repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
liangz0707/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/pandas/tests/test_multilevel.py | 1 | 87291 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
import datetime
import itertools
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull, Timestamp
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
tm._skip_if_no_pytz()
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3, tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# GH 7112
import pytz
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz)),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz)),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
self.assertTrue(result.equals(expected))
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
self.assertTrue(result.equals(expected))
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)])
self.assertTrue(result.equals(expected))
result = midx_lv2.append(midx_lv3)
self.assertTrue(result.equals(expected))
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, datetime.datetime(2011, 1, 1, tzinfo=tz), 'A'),
(1.2, datetime.datetime(2011, 1, 2, tzinfo=tz), 'B'),
(1.3, datetime.datetime(2011, 1, 3, tzinfo=tz), 'C')]
+ expected_tuples), None)
self.assertTrue(result.equals(expected))
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = self.round_trip_pickle(frame)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEqual(result.index.names, self.frame.index.names)
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3,4)
for gen, extra in [([1.,3.,2.,5.],4.),
([1,3,2,5],4),
([Timestamp('20130101'),Timestamp('20130103'),Timestamp('20130102'),Timestamp('20130105')],Timestamp('20130104')),
(['1one','3one','2one','5one'],'4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,DataFrame('world',
index=list('def'),
columns=MultiIndex.from_tuples([('red', extra)]))],axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
self.assertEqual(str(df2).splitlines()[0].split(),['red'])
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:,[0,2,1,3]]
assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:,[0,2,1,4,3]]
assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red',extra)] = 'world'
result = result.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assertTrue(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEqual(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assertTrue(isnull(s.values[42:65]).all())
self.assertTrue(notnull(s.values[:42]).all())
self.assertTrue(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assertTrue(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assertTrue((cp.values[:4] == 0).all())
self.assertTrue((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
assert_series_equal(sliced_a1, sliced_b1, check_names=False)
assert_series_equal(sliced_a2, sliced_b1, check_names=False)
self.assertEqual(sliced_a1.name, ('A', '1'))
self.assertEqual(sliced_a2.name, ('A', '2'))
self.assertEqual(sliced_b1.name, ('B', '1'))
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assertTrue((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assertTrue((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEqual(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEqual(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEqual(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assertTrue((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assertTrue(com.is_integer_dtype(deleveled['prm1']))
self.assertTrue(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEqual(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with assertRaisesRegexp(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with assertRaisesRegexp(IndexError, "not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
result = df['foo'].stack()
assert_series_equal(stacked['foo'], result, check_names=False)
self.assertIs(result.name, None)
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEqual(unstacked.index.name, 'first')
self.assertEqual(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEqual(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEqual(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEqual(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with assertRaisesRegexp(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with assertRaisesRegexp(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with assertRaisesRegexp(IndexError, "not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEqual(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assertTrue((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assertFalse(np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assertFalse(swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assertTrue(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assertTrue((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assertTrue(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assertFalse(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assertFalse(index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assertTrue((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assertTrue((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index._tuple_index)]
result = s['qux']
result2 = s.ix['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'b')
result = series.count(level='a')
expect = self.series.count(level=0)
assert_series_equal(result, expect, check_names=False)
self.assertEqual(result.index.name, 'a')
self.assertRaises(KeyError, series.count, 'x')
self.assertRaises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
self.assertTrue(leftside._get_axis(axis).equals(level_index))
self.assertTrue(rightside._get_axis(axis).equals(level_index))
assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10),
np.tile(np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
assert_frame_equal(result, expected, check_names=False) # TODO groupby with level_values drops names
self.assertEqual(result.index.names, self.ymd.index.names[:2])
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df.consolidate()
def test_ix_preserve_names(self):
result = self.ymd.ix[2000]
result2 = self.ymd['A'].ix[2000]
self.assertEqual(result.index.names, self.ymd.index.names[1:])
self.assertEqual(result2.index.names, self.ymd.index.names[1:])
result = self.ymd.ix[2000, 2]
result2 = self.ymd['A'].ix[2000, 2]
self.assertEqual(result.index.name, self.ymd.index.names[2])
self.assertEqual(result2.index.name, self.ymd.index.names[2])
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.ix[2000, 4] = 0
exp.ix[2000, 4].values[:] = 0
assert_frame_equal(df, exp)
df['A'].ix[2000, 4] = 1
exp['A'].ix[2000, 4].values[:] = 1
assert_frame_equal(df, exp)
df.ix[2000] = 5
exp.ix[2000].values[:] = 5
assert_frame_equal(df, exp)
# this works...for now
df['A'].ix[14] = 5
self.assertEqual(df['A'][14], 5)
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
self.assertEqual(unstacked['A', 1].dtype, np.float64)
self.assertEqual(unstacked['E', 1].dtype, np.object_)
self.assertEqual(unstacked['F', 1].dtype, np.float64)
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
self.assertEqual(result.shape, (500, 2))
# test roundtrip
stacked = result.stack()
assert_series_equal(s,
stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
self.assertEqual(result.shape, (500, 2))
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)]
+ [labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
self.assertEqual(result.shape, (500, 2))
def test_getitem_lowerdim_corner(self):
self.assertRaises(KeyError, self.frame.ix.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.ix[('bar','three'),'B'] = 0
self.assertEqual(self.frame.sortlevel().ix[('bar','three'),'B'], 0)
#----------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
raise nose.SkipTest("skipping for now")
result = self.ymd.ix[2000, 0]
expected = self.ymd.ix[2000]['A']
assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.ix[2000, 0] = 0
# self.assertTrue((self.ymd.ix[2000]['A'] == 0).all())
# Pretty sure the second (and maybe even the first) is already wrong.
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6))
self.assertRaises(Exception, self.ymd.ix.__getitem__, (2000, 6), 0)
#----------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0),
('foo', 'qux', 0)],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.ix[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertRaises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.ix[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'),
('foo', 'qux')],
[0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.ix[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = frame.ix[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
self.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
assert_frame_equal(result, expected)
def test_mixed_depth_get(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'a')
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, ('routine1', 'result1'))
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.ix[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
assert_series_equal(expected, result, check_names=False)
assert_frame_equal(df1, df2)
self.assertEqual(result.name, 'a')
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
assert_frame_equal(expected, result)
assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.ix[[0, 1, 2, 7, 8, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
assert_frame_equal(result, expected.T)
result = self.frame.ix[['foo', 'qux']]
assert_frame_equal(result, expected)
result = self.frame['A'].ix[['foo', 'qux']]
assert_series_equal(result, expected['A'])
result = self.frame.T.ix[:, ['foo', 'qux']]
assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix[['foo', 'bar']] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.ix['foo':'bar'] = 0
expected.ix['foo'] = 0
expected.ix['bar'] = 0
assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]]
assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]]
assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.ix[[0, 1, 2, 5, 6]].T
assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.ix[[0, 2, 3, 6, 7, 9]].T
assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
self.assertEqual(result.index.names, ('one', 'two'))
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples(
[('1a', '2a'), ('1a', '2b'), ('1a', '2c')])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
self.assertTrue(df['new'].isnull().all())
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.ix[subset] = 99
self.assertTrue((self.frame.ix[subset].values == 99).all())
col = self.frame['B']
col[subset] = 97
self.assertTrue((self.frame.ix[subset, 'B'] == 97).all())
def test_frame_dict_constructor_empty_series(self):
s1 = Series([1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3),
(2, 2), (2, 4)]))
s2 = Series([1, 2, 3, 4],
index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
df = DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
df = DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'),
('Colorado', 'Green')])
index = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.ix[:, 1]
exp = frame.icol(1)
tm.assert_isinstance(result, Series)
assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.ix[ix, "C"] = '_'
self.assertTrue((df.xs((1, 1))['C'] == '_').all())
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
self.assertEqual(s[("a", 5)], 5)
self.assertEqual(s[("a", 6)], 6)
self.assertEqual(s[("a", 7)], 7)
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3]])
self.assertTrue(isnull(index[4][0]))
self.assertTrue(isnull(index.values[4][0]))
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo','demo','demo','demo']
idx = MultiIndex.from_tuples(idx_tp,names = ['STK_ID','RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
self.assertEqual(len(result), 3)
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['bah','bam',3.0,3],
['bah','bam',4.0,4],['foo','bar',5.0,5],['bah','bam',6.0,6]],
columns=list('ABCD'))
df = df.set_index(['A','B'])
df = df.sortlevel(0)
expected = DataFrame([['foo','bar',1.0,1],['foo','bar',2.0,2],['foo','bar',5.0,5]],
columns=list('ABCD')).set_index(['A','B'])
result = df.loc[('foo','bar')]
assert_frame_equal(result,expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2 ,3], [1, 1, 1, 1, 2, 2]))
expected = np.array([False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([1, 2, 3, 2 ,3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(take_last=True)
tm.assert_numpy_array_equal(duplicated, expected)
self.assertTrue(duplicated.dtype == bool)
expected = MultiIndex.from_arrays(([2, 3, 1, 2 ,3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(take_last=True), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M', tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'], tz='Asia/Tokyo')
self.assertTrue(idx.levels[0].equals(expected1))
self.assertTrue(idx.levels[1].equals(idx2))
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product([date1,date2,date3],[date1,date2,date3]):
index = pd.MultiIndex.from_product([[d1],[d2]])
self.assertIsInstance(index.levels[0],pd.DatetimeIndex)
self.assertIsInstance(index.levels[1],pd.DatetimeIndex)
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame({'label':['a', 'a', 'a', 'b', 'b', 'b'],
'datetime':['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value':range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00', '2011-07-19 09:00:00'])
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
self.assertTrue(df.index.levels[0].equals(expected))
self.assertTrue(df.index.levels[1].equals(pd.Index(['a', 'b'])))
df = df.swaplevel(0, 1)
self.assertTrue(df.index.levels[0].equals(pd.Index(['a', 'b'])))
self.assertTrue(df.index.levels[1].equals(expected))
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'], tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-02 09:00', '2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'], tz='US/Eastern')
self.assertTrue(df.index.levels[0].equals(expected1))
self.assertTrue(df.index.levels[1].equals(expected2))
self.assertTrue(df.index.levels[2].equals(idx3))
# GH 7092
self.assertTrue(df.index.get_level_values(0).equals(idx1))
self.assertTrue(df.index.get_level_values(1).equals(idx2))
self.assertTrue(df.index.get_level_values(2).equals(idx3))
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx1')
idx2 = pd.Index(range(5), name='idx2',dtype='int64')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame({'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5,dtype='int64'),
'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS', tz='Europe/Paris', name='idx3')
idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3])
df = pd.DataFrame({'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5,dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5,dtype='int64'), 'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(lambda d: pd.Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(lambda d: pd.Timestamp(d, tz='Europe/Paris'))
assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = pd.MultiIndex.from_product([['a','b'], pd.date_range('20130101', periods=3, tz=tz)])
df = pd.DataFrame(np.arange(6,dtype='int64').reshape(6,1), columns=['a'], index=idx)
expected = pd.DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(lambda d: pd.Timestamp(d, offset='D', tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = pd.MultiIndex.from_product([pd.period_range('20130101', periods=3, freq='M'),
['a','b','c']], names=['month', 'feature'])
df = pd.DataFrame(np.arange(9,dtype='int64').reshape(-1,1), index=idx, columns=['a'])
expected = pd.DataFrame({'month': [pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3,
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')},
columns=['month', 'feature', 'a'])
assert_frame_equal(df.reset_index(), expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='Y')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
self.assertTrue(df.index.levels[0].equals(expected1))
self.assertTrue(df.index.levels[1].equals(expected2))
self.assertTrue(df.index.levels[2].equals(idx3))
self.assertTrue(df.index.get_level_values(0).equals(idx1))
self.assertTrue(df.index.get_level_values(1).equals(idx2))
self.assertTrue(df.index.get_level_values(2).equals(idx3))
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = pd.MultiIndex.from_tuples([(1, 2), (3, 4),
(5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = pd.Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data),)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
zoranzhao/NoSSim | NoS_ECG/ga_dse.py | 1 | 7996 | #!/usr/bin/env python2.7
# NoSSim exploration framework
# Author: Zhuoran Zhao
# Date: 2017/03/18
# This GA-based exploration is developed with DEAP library
# Single/Multi-Object GA are encapsulated in different functions main_ga, main_moga
import random
import array
import json
import numpy
import pprint
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import benchmarks
from deap.benchmarks.tools import diversity, convergence
import explore
#cli_num = 6
#{
# "protocol": config_vec[0], #0-2
# "srv_type":config_vec[1],#0-2
# "srv_core_num": config_vec[2],#1-2
#}#min [0,1,1] max [2,2,2]
#offload_list = #min [0,0,0,0,0,0]# max [3,3,3,3,3,3]
#cli_type_list = #min [0,0,0,0,0,0]# max [1,1,1,1,1,1]
#cli_core_num_list = [1,1,1,1,1,1]
#min = [0,1,1, 0,0,0,0,0,0, 0,0,0,0,0,0]
#max = [2,2,2, 3,3,3,3,3,3, 1,1,1,1,1,1]
#result = explore.evaluate(6, individual[0:3], individual[3:9], individual[9:15], [1,1,1,1,1,1])
#result = explore.evaluate(cli_num, config_vec, offload_list, cli_type_list, cli_core_num_list)
#return 1/result["o2o_delay"] , result["cli"]["lwip_core"]+result["cli"]["app_core"]
creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMulti)
toolbox = base.Toolbox()
knobs_low = [0,1,1, 0,0,0,0,0,0, 0,0,0,0,0,0]
knobs_up = [2,2,2, 3,3,3,3,3,3, 1,1,1,1,1,1]
#customize the
ga_data={}
for i in range(len(knobs_low)):
toolbox.register("attr_int"+str(i), random.randint, knobs_low[i], knobs_up[i])
#toolbox.register("attr_int0", random.randint, 1, 5)
#toolbox.register("attr_int1", random.randint, 1, 5)
#toolbox.register("attr_int2", random.randint, 1, 5)
#toolbox.register("attr_int3", random.randint, 1, 5)
#toolbox.register("attr_int4", random.randint, 1, 5)
#toolbox.register("attr_int5", random.randint, 1, 5)
#toolbox.register("attr_int6", random.randint, 1, 5)
#toolbox.register("attr_int7", random.randint, 10, 20)
#toolbox.register("attr_int8", random.randint, 10, 20)
#toolbox.register("attr_int9", random.randint, 10, 20)
#toolbox.register("attr_int10", random.randint, 10, 20)
#toolbox.register("attr_int11", random.randint, 10, 20)
toolbox.register("individual", tools.initCycle, creator.Individual,
(toolbox.attr_int0, toolbox.attr_int1, toolbox.attr_int2, toolbox.attr_int3, toolbox.attr_int4,
toolbox.attr_int5, toolbox.attr_int6, toolbox.attr_int7, toolbox.attr_int8, toolbox.attr_int9,
toolbox.attr_int10, toolbox.attr_int11,toolbox.attr_int12, toolbox.attr_int13,toolbox.attr_int14),
n=1)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def evalOneMax(individual):
result = explore.evaluate(6, individual[0:3], individual[3:9], individual[9:15], [1,1,1,1,1,1])
#result = explore.evaluate(cli_num, config_vec, offload_list, cli_type_list, cli_core_num_list)
return 1/result["o2o_delay"] , result["cli"]["lwip_core"]+result["cli"]["app_core"]
#return sum(individual[0:8]), sum(individual[6:len(individual)])
#----------
def mainNSGA(seed=None):
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=knobs_low, up=knobs_up, indpb=0.05)
toolbox.register("select", tools.selNSGA2)
random.seed(seed)
# MU is the size of population, total number of individuals
# in each generation
#
# CXPB is the probability with which two individuals
# are crossed
#
# MUTPB is the probability for mutating an individual
#
# NGEN is the number of generations for which the
# evolution runs
MU = 200
CXPB = 0.8
MUTPB = 0.8
NGEN = 20
pop = toolbox.population(n=MU)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
print(" ======Beginning %i th generation======: " % gen)
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
del ind1.fitness.values, ind2.fitness.values
if random.random() <= MUTPB:
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
fitness_list = []
fronts_lists = tools.sortNondominated(pop, len(pop), first_front_only=True)[0]
fronts=[]
for i in range(len(fronts_lists)):
if fronts_lists[i] not in fronts:
fronts.append(fronts_lists[i])
fitness_list.append(fronts_lists[i].fitness.values)
print " Pareto front is:"
ga_data[gen]={"fitness":fitness_list,"front":fronts}
pprint.pprint( fitness_list )
pprint.pprint( fronts )
print(" Evaluated %i individuals\n" % len(invalid_ind))
jsonConfigFile="./ga_data.json"
with open(jsonConfigFile,"w") as jFile:
json.dump(ga_data, jFile, indent=4, separators=(',', ': '))
print("-- End of (successful) evolution --")
return pop
import matplotlib.pyplot as plt
def plot(cli_core_list, QoS_list):
colorsred = [[1,0.96,0.96], [1,0.9,0.9], [1,0.7,0.7], [1,0.5,0.5], [1,0.3,0.3],[1,0.1,0.1], [0.9,0,0]]
colorsgreen = ['darkgreen', 'seagreen', 'limegreen', 'springgreen']
colorsblue =[[0.8,0.8,0.9],[0.6,0.6,0.9], [0.4,0.4,0.9], [0.2,0.2,0.9],[0,0,0.9]]
colorspurple =['indigo', 'purple', 'blueviolet', 'mediumorchid']
colors = colorsred+colorsgreen+colorsblue+colorspurple
label_size=30
font_size=22
legend_size=16
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
i=0
for cli_core, QoS in zip(cli_core_list, QoS_list):
d1 = ax.scatter (cli_core, QoS, s=20, marker="s", color=colors[i%(len(colors))])
i=i+1
plt.gca().invert_yaxis()
#plt.gca().invert_xaxis()
plt.ylabel('Throughput', fontsize=label_size)
#plt.xlabel('Client core utilization', fontsize=24)
plt.xticks(fontsize=font_size)
plt.yticks(fontsize=font_size)
#plt.xlim([0,0.8])
#plt.ylim([30,0])
vals = ax.get_xticks()
#ax.set_xticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#ax.set_xlabel('Client core utilization', fontsize=label_size)
plt.show()
if __name__ == "__main__":
#pop = mainNSGA()
metricx_list=[]
metricy_list=[]
with open('./ga_data.json') as data_file:
data = json.load(data_file)
for key in range(len(data.keys())):
#print data[key]["fitness"]
#print data[key]["front"]
metricx=[]
metricy=[]
for item in data[str(key+1)]["fitness"]:
metricx.append(item[1])
metricy.append(item[0])
metricx_list.append(metricx)
metricy_list.append(metricy)
#dse.plot(metricx_list, metricy_list)
cli_core_list=[[]]
QoS_list=[[]]
with open('./data_rd2.json') as data_file:
data = json.load(data_file)
for key in data.keys():
cli_core_list[0] = cli_core_list[0] + data[key]["cli_core"]
QoS_list[0] = QoS_list[0] + data[key]["QoS"]
import dse
dse.plot(cli_core_list+metricx_list, QoS_list+metricy_list)
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
Nyker510/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/indexes/interval/test_interval.py | 1 | 47391 | from __future__ import division
from itertools import permutations
import pytest
import numpy as np
import re
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, date_range, timedelta_range)
from pandas.compat import lzip
import pandas.core.common as com
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
@pytest.mark.parametrize('breaks', [
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(['20170101', '20170202', '20170303', '20170404']),
pd.to_timedelta(['1ns', '2ms', '3s', '4M', '5H', '6D'])])
def test_length(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.length
expected = Index(iv.length for iv in index)
tm.assert_index_equal(result, expected)
# with NA
index = index.insert(1, np.nan)
result = index.length
expected = Index(iv.length if notna(iv) else iv for iv in index)
tm.assert_index_equal(result, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex(index._ndarray_values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right')])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique is True
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique is True
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique is True
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique is True
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert idx.is_unique is False
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique is True
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
# TODO: check this behavior is consistent with test_interval_new.py
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [3, Interval(1, 4)])
def test_get_loc_length_one(self, item, closed):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_loc(item)
assert result == 0
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_get_loc_datetimelike_nonoverlapping(self, breaks):
# GH 20636
# nonoverlapping = IntervalIndex method and no i8 conversion
index = IntervalIndex.from_breaks(breaks)
value = index[0].mid
result = index.get_loc(value)
expected = 0
assert result == expected
interval = Interval(index[0].left, index[1].right)
result = index.get_loc(interval)
expected = slice(0, 2)
assert result == expected
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_loc_datetimelike_overlapping(self, arrays):
# GH 20636
# overlapping = IntervalTree method with i8 conversion
index = IntervalIndex.from_arrays(*arrays)
value = index[0].mid + Timedelta('12 hours')
result = np.sort(index.get_loc(value))
expected = np.array([0, 1], dtype='int64')
assert tm.assert_numpy_array_equal(result, expected)
interval = Interval(index[0].left, index[1].right)
result = np.sort(index.get_loc(interval))
expected = np.array([0, 1, 2], dtype='int64')
assert tm.assert_numpy_array_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [
[3], np.arange(1, 5), [Interval(1, 4)], interval_range(1, 4)])
def test_get_indexer_length_one(self, item, closed):
# GH 17284
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_indexer(item)
expected = np.array([0] * len(item), dtype='intp')
tm.assert_numpy_array_equal(result, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_reindexer_datetimelike(self, arrays):
# GH 20636
index = IntervalIndex.from_arrays(*arrays)
tuples = [(index[0].left, index[0].left + pd.Timedelta('12H')),
(index[-1].right - pd.Timedelta('12H'), index[-1].right)]
target = IntervalIndex.from_tuples(tuples)
result = index._get_reindexer(target)
expected = np.array([0, 3], dtype='int64')
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_maybe_convert_i8(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
# intervalindex
result = index._maybe_convert_i8(index)
expected = IntervalIndex.from_breaks(breaks.asi8)
tm.assert_index_equal(result, expected)
# interval
interval = Interval(breaks[0], breaks[1])
result = index._maybe_convert_i8(interval)
expected = Interval(breaks[0].value, breaks[1].value)
assert result == expected
# datetimelike index
result = index._maybe_convert_i8(breaks)
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
# datetimelike scalar
result = index._maybe_convert_i8(breaks[0])
expected = breaks[0].value
assert result == expected
# list-like of datetimelike scalars
result = index._maybe_convert_i8(list(breaks))
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
np.arange(5, dtype='int64'),
np.arange(5, dtype='float64')], ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_numeric(self, breaks, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
key = make_key(breaks)
# no conversion occurs for numeric
result = index._maybe_convert_i8(key)
assert result is key
@pytest.mark.parametrize('breaks1, breaks2', permutations([
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], 2), ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks1)
key = make_key(breaks2)
msg = ('Cannot index an IntervalIndex of subtype {dtype1} with '
'values of dtype {dtype2}')
msg = re.escape(msg.format(dtype1=breaks1.dtype, dtype2=breaks2.dtype))
with tm.assert_raises_regex(ValueError, msg):
index._maybe_convert_i8(key)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
# TODO: check this behavior is consistent with test_interval_new.py
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
# GH 19101: empty result, same dtype
index = IntervalIndex(np.array([], dtype='int64'), closed=closed)
result = index.union(index)
tm.assert_index_equal(result, index)
# GH 19101: empty result, different dtypes
other = IntervalIndex(np.array([], dtype='float64'), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
# GH 19101: empty result, same dtype
other = IntervalIndex.from_breaks(range(300, 314), closed=closed)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
breaks = np.arange(300, 314, dtype='float64')
other = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
# GH 19101: empty result, same dtype
result = index.difference(index)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(index.left.astype('float64'),
index.right, closed=closed)
result = index.difference(other)
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self, closed):
index = self.create_index(closed=closed)
result = index[1:].symmetric_difference(index[:-1])
expected = IntervalIndex([index[0], index[-1]])
tm.assert_index_equal(result, expected)
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(index.left.astype('float64'),
index.right, closed=closed)
result = index.symmetric_difference(other)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# non-IntervalIndex
msg = ('the other index needs to be an IntervalIndex too, but '
'was type Int64Index')
with tm.assert_raises_regex(TypeError, msg):
set_op(Index([1, 2, 3]))
# mixed closed
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
# GH 19016: incompatible dtypes
other = interval_range(Timestamp('20180101'), periods=9, closed=closed)
msg = ('can only do {op} between two IntervalIndex objects that have '
'compatible dtypes').format(op=op_name)
with tm.assert_raises_regex(TypeError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_datetime(self, tz):
start = Timestamp('2000-01-01', tz=tz)
dates = date_range(start=start, periods=10)
index = IntervalIndex.from_breaks(dates)
# test mid
start = Timestamp('2000-01-01T12:00', tz=tz)
expected = date_range(start=start, periods=9)
tm.assert_index_equal(index.mid, expected)
# __contains__ doesn't check individual points
assert Timestamp('2000-01-01', tz=tz) not in index
assert Timestamp('2000-01-01T12', tz=tz) not in index
assert Timestamp('2000-01-02', tz=tz) not in index
iv_true = Interval(Timestamp('2000-01-01T08', tz=tz),
Timestamp('2000-01-01T18', tz=tz))
iv_false = Interval(Timestamp('1999-12-31', tz=tz),
Timestamp('2000-01-01', tz=tz))
assert iv_true in index
assert iv_false not in index
# .contains does check individual points
assert not index.contains(Timestamp('2000-01-01', tz=tz))
assert index.contains(Timestamp('2000-01-01T12', tz=tz))
assert index.contains(Timestamp('2000-01-02', tz=tz))
assert index.contains(iv_true)
assert not index.contains(iv_false)
# test get_indexer
start = Timestamp('1999-12-31T12:00', tz=tz)
target = date_range(start=start, periods=7, freq='12H')
actual = index.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
start = Timestamp('2000-01-08T18:00', tz=tz)
target = date_range(start=start, periods=7, freq='6H')
actual = index.get_indexer(target)
expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', otherwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)),
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)),
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10))])
def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
expected = Index(com.asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)) + [np.nan],
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)) + [np.nan],
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10)) + [np.nan]])
@pytest.mark.parametrize('na_tuple', [True, False])
def test_to_tuples_na(self, tuples, na_tuple):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
expected_notna = Index(com.asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
# check the NA portion
result_na = result[-1]
if na_tuple:
assert isinstance(result_na, tuple)
assert len(result_na) == 2
assert all(isna(x) for x in result_na)
else:
assert isna(result_na)
def test_nbytes(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
result = IntervalIndex.from_arrays(left, right).nbytes
expected = 64 # 4 * 8 * 2
assert result == expected
def test_itemsize(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
expected = 16 # 8 * 2
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = IntervalIndex.from_arrays(left, right).itemsize
assert result == expected
@pytest.mark.parametrize('new_closed', [
'left', 'right', 'both', 'neither'])
def test_set_closed(self, name, closed, new_closed):
# GH 21670
index = interval_range(0, 5, closed=closed, name=name)
result = index.set_closed(new_closed)
expected = interval_range(0, 5, closed=new_closed, name=name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('bad_closed', ['foo', 10, 'LEFT', True, False])
def test_set_closed_errors(self, bad_closed):
# GH 21670
index = interval_range(0, 5)
msg = "invalid option for 'closed': {closed}".format(closed=bad_closed)
with tm.assert_raises_regex(ValueError, msg):
index.set_closed(bad_closed)
| bsd-3-clause |
yuliang419/K2tools | ecentroid.py | 1 | 3828 | """
Plots any selected star(s) to use as reference star(s) for detrending.
Writes good frame numbers into file named ref_cad.dat, and centroids into ref_centroid.dat (for one chosen star only,
but we can probably average a few).
"""
from pixel2flux import *
import matplotlib.pyplot as plt
plt.ion()
import numpy as np
import sys
from collections import Counter
epics = np.loadtxt('guide_stars.txt', dtype=str)
field = sys.argv[1]
write = True
master_mask = []
master_x = []
master_y = []
for epic in epics:
print epic
targ = PixelTarget(epic, field, 'l')
targ.read_fits(clean=False)
print len(targ)
if targ.kmag > 15:
print 'Reference star too faint'
continue
elif targ.kmag <= 10:
continue
aperture = targ.find_aper()
# fig = plt.figure(figsize=(8,8))
# ax = fig.add_subplot(111)
# draw_aper(targ, aperture.labels, ax)
# plt.savefig('outputs/'+epic+'_aper.png', dpi=150)
ftot = targ.aper_phot(aperture)
goodcads = targ.find_thrust(printtimes=False)
print 'No thruster fire:', len(goodcads)
master_mask.append(goodcads)
master_x.append(targ.data['x'])
master_y.append(targ.data['y'])
ref_x = np.nanmedian(master_x, axis=0) # for entire light curve, no points removed
ref_y = np.nanmedian(master_y, axis=0)
master_mask = np.array(master_mask)
cnt = Counter(np.hstack(master_mask))
refcad = [k for k, v in cnt.iteritems() if v > 1] # point is good if it's good in at least two targets
print 'no. of good points=', len(refcad)
# fig = plt.figure(figsize=(10,4))
# targ = PixelTarget(epics[1], field, 'l')
# targ.read_fits(clean=True)
# labels = targ.find_aper()
# ftot = targ.aper_phot(labels)
# plt.plot(targ.data['jd'], ftot, 'b.')
# # plt.plot(targ.data['jd'][~thruster_mask], ftot[~thruster_mask], 'ro')
# plt.show()
for epic in epics:
print 'Plotting ', epic
targ = PixelTarget(epic, field, 'l')
targ.read_fits(clean=False)
aperture = targ.find_aper()
ftot = targ.aper_phot(aperture)
bad = [i for i in targ.data['cadence'] if i not in refcad]
fig, ax = plt.subplots(2, 2, figsize=(15,12))
ax[0, 0].plot(targ.data['jd'], ftot, lw=0, marker='.')
ax[0, 0].plot(targ.data['jd'][bad], ftot[bad], 'ro')
ax[0, 0].set_xlabel('t')
ax[0, 0].set_ylabel('Flux')
colors = ['r', 'y', 'g', 'c', 'b', 'm']
inds = np.linspace(0, len(targ.data['jd']), 7)
for i in range(len(inds) - 1):
start = int(inds[i])
end = int(inds[i + 1])
ax[0, 1].plot(targ.data['x'][start:end], targ.data['y'][start:end], marker='.', lw=0, color=colors[i])
ax[0, 1].set_xlabel('x')
ax[0, 1].set_ylabel('y')
# ax[0, 1].plot(targ.data['x'][outlier], targ.data['y'][outlier], lw=0, marker='o', color='g')
ax[0, 1].plot(targ.data['x'][bad], targ.data['y'][bad], 'ro')
ax[1, 0].plot(targ.data['jd'], targ.data['x'], lw=0, marker='.')
ax[1, 0].set_xlabel('t')
ax[1, 0].set_ylabel('x')
for time in targ.data['jd'][bad]:
plt.axvline(x=time, color='r')
ax[1, 1].plot(targ.data['jd'], targ.data['y'], lw=0, marker='.')
ax[1, 1].set_xlabel('t')
ax[1, 1].set_ylabel('y')
plt.show()
if write:
np.savetxt('ref_cad.dat', np.transpose(refcad), fmt='%d')
targ.remove_thrust(refcad)
ref_x = ref_x[refcad]
ref_y = ref_y[refcad]
fig = plt.figure()
plt.plot(ref_x, ref_y, 'b.')
plt.show()
if write:
outfile = open('ref_centroid_full.dat', 'w')
print>> outfile, '# cadence x y seg'
print len(targ.data['cadence']), len(ref_x)
seg = 0
for i in range(0, len(targ.data['jd'])):
if (targ.data['cadence'][i] > 0) and (targ.data['cadence'][i] - targ.data['cadence'][i - 1] > 1):
seg += 1
print>> outfile, targ.data['cadence'][i], ref_x[i], ref_y[i], seg, targ.data['jd'][i]
outfile.close()
| mit |
olologin/scikit-learn | examples/classification/plot_classifier_comparison.py | 36 | 5123 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
idlead/scikit-learn | examples/linear_model/plot_ard.py | 18 | 2827 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
pylayers/pylayers | pylayers/simul/link.py | 1 | 78614 | #
# -*- coding: utf-8 -*-
#
from __future__ import print_function
r"""
.. currentmodule:: pylayers.simul.link
.. autosummary::
:members:
"""
try:
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
import doctest
import time
import numpy as np
import matplotlib.pylab as plt
import pylayers.signal.waveform as wvf
import pylayers.util.geomutil as geu
from pylayers.util.project import *
import pylayers.util.pyutil as pyu
from pylayers.simul.radionode import RadioNode
# Handle Layout
from pylayers.gis.layout import Layout
# Handle Antenna
from pylayers.antprop.antenna import Antenna
# Handle Signature
from pylayers.antprop.signature import Signatures,Signature
# Handle Rays
from pylayers.antprop.rays import Rays
# Handle VectChannel and ScalChannel
from pylayers.antprop.channel import Ctilde, Tchannel , AFPchannel
from pylayers.antprop.statModel import getchannel
import tqdm
import copy
import h5py
import pdb
class Link(PyLayers):
""" Link class
Members
-------
"""
def __init__(self):
""" Link evaluation metaclass
"""
PyLayers.__init__(self)
def __add__(self,l):
""" merge ak and tauk of 2 Links
"""
L = Link()
tk = np.hstack((self.H.tk,l.H.tk))
ak = np.hstack((self.H.ak,l.H.ak))
us = np.argsort(tk)
L.H.ak = ak[us]
L.H.tk = tk[us]
return L
class SLink(Link):
def __init__(self):
""" Statistical Link evaluation class
"""
super(SLink, self).__init__()
def onbody(self, B, dida, didb, a, b):
""" Statistical evaluation of a on-body link
Parameters
----------
B : Body
Body object on which devices are held
dida: int
device a id number on body
didb: int
device b id number on body
a : nd array
position of device a
b : nd array
position of device b
Returns
-------
(ak, tk, eng )
ak : ndarray
alpha_k
tk : ndarray
tau_k
See Also
--------
pylayers.mobility.ban.body
"""
# inter to be replace by engaement
eng = B.intersectBody3(a, b, topos=True)
empa = B.dev[dida]['cyl']
empb = B.dev[didb]['cyl']
emp = empa
if empa == 'trunkb':
emp = empb
if emp == 'forearml':
emp = 'forearmr'
self.H.ak, self.H.tk = getchannel(emplacement=emp, intersection=eng)
self.eng = eng
return self.H.ak, self.H.tk, self.eng
class DLink(Link):
""" Deterministic Link Class
Attributes
----------
L : Layout
Layout to be used
Aa : Antenna
Antenna of device dev_a
Ab : Antenna
Antenna of device dev_b
a : np.ndarray (3,)
position of a device dev_a
b : np.ndarray (3,)
position of a device dev_b
ca : int
cycle a number
cb : int
cycle b number
Ta : np.ndarray (3,3)
Rotation matrice of Antenna of device dev_a relative to global Layout scene
Tb : np.ndarray (3,3)
Rotation matrice of Antenna of device dev_b relative to global Layout scene
fGHz : np.ndarray (Nf,)
frequency range of Nf points used for evaluation of channel
wav : Waveform
Waveform to be applied on the channel
save_idx : int
number to identify the h5 file generated
"""
def __init__(self, **kwargs):
""" deterministic link evaluation
Advanced (change only if you really know what you do !)
save_opt : list (['sig','ray','Ct','H'])
information to be saved in the Links h5 file. Should never be Modified !
force_create : Boolean (False)
force creating the h5py file (if already exist, will be erased)
Notes
-----
All simulations are stored into a unique file in your <PyProject>/output directory
using the following convention:
Links_<save_idx>_<LayoutFilename>.h5
where
<save_idx> is an integer number to distinguish different links simulations
and <LayoutFilename> is the Layout used for the link simulation.
Dataset organisation:
Links_<idx>_<Layout_name>.h5
|
|/sig/si_ID#0/
| /si_ID#1/
| ...
|
|/ray/ray_ID#0/
| /ray_ID#1/
| ...
|
|/Ct/Ct_ID#0/
| /Ct_ID#1/
| ...
|
|/H/H_ID#0/
| /H_ID#1/
| ...
|
|
|p_map
|c_map
|f_map
|A_map
|T_map
Roots Dataset :
c_map : Cycles (Nc x 3)
p_map : Positions (Np x 3)
f_map : Frequency (Nf x 3)
T_map : Rotation matrices (Nt x 3)
A_map : Antenna name (Na x 3)
Groups and subgroups:
Signature identifier (si_ID#N):
ca_cb_cutoff_th
Ray identifier (ray_ID#N):
cutoff_th_ua_ub
Ctilde identifier (Ct_ID#N):
ua_ub_uf
H identifier (H_ID#N):
ua_ub_uf_uTa_uTb_uAa_uAb
with
ca : cycle number of a
cb : cycle number of b
cutoff : signature.run cutoff
th : signature.run threshold * 100
ua : indice of a position in 'p_map' position dataset
ub : indice of a position in 'p_map' position dataset
uf : indice of freq position in 'f_map' frequency dataset
uTa : indice of a position in 'T_map' Rotation dataset
uTb : indice of a position in 'T_map' Rotation dataset
uAa : indice of a position in 'A_map' Antenna name dataset
uAb : indice of b position in 'A_map' Antenna name dataset
Examples
--------
>>> from pylayers.simul.link import *
>>> DL = DLink(L=Layout('DLR.lay'))
>>> DL.eval()
Notes
-----
When modifying the coordinates of the link it is important to modify
the array and not one of its component. In that case the cycle will not
be updated.
"""
Link.__init__(self)
logger.name = __name__
defaults={ 'L': '',
'a': np.array(()),
'b': np.array(()),
'Aa': [],
'Ab': [],
'Ta': np.eye(3),
'Tb': np.eye(3),
'fGHz': np.array([2.4]),
'wav': wvf.Waveform(),
'cutoff': 3,
'threshold': 0.8,
'delay_excess_max_ns':500,
'save_opt': ['sig','ray2','ray','Ct','H'],
'save_idx':0,
'force_create':False,
'seed':0,
'graph':'tcvirw'
}
# self._ca = -1
# self._cb = -1
specset = ['a','b','Aa','Ab','Ta','Tb','cutoff','L','fGHz','wav']
# set default attribute
for key, value in defaults.items():
if key not in kwargs:
if key in specset :
setattr(self,'_'+key,value)
else :
setattr(self,key,value)
else :
if key in specset :
setattr(self,'_'+key,kwargs[key])
else :
setattr(self,key,kwargs[key])
# if self._L == '':
# raise AttributeError('Please specify a Layout')
force = self.force_create
delattr(self,'force_create')
# dictionnary data exists
self.dexist={'sig':{'exist':False,'grpname':''},
'ray':{'exist':False,'grpname':''},
'ray2':{'exist':False,'grpname':''},
'Ct':{'exist':False,'grpname':''},
'H':{'exist':False,'grpname':''}
}
# The link frequency range depends on the antenna
# self.fGHz = kwargs['fGHz']
if self.Aa == []:
self.Aa = Antenna(typ='Omni',fGHz=self.fGHz)
if self.Ab == []:
self.Ab = Antenna(typ='Omni',fGHz=self.fGHz)
if isinstance(self._L,str):
self._Lname = self._L
self._L = Layout(self._Lname,bgraphs=True,bcheck=False)
else:
self._Lname = self._L._filename
if self._Lname != '':
self.filename = 'Links_' + str(self.save_idx) + '_' + self._Lname + '.h5'
filenameh5 = pyu.getlong(self.filename,pstruc['DIRLNK'])
# check if save file already exists
if not os.path.exists(filenameh5) or force:
print('Links save file for ' + self.L._filename + ' does not exist.')
print('Creating file. You\'ll see this message only once per Layout')
self.save_init(filenameh5)
try:
self.L.dumpr()
print('Layout Graph loaded')
except:
print('This is the first time the Layout is used. Graphs have to be built. Please Wait')
self.L.build(graph=self.graph)
self.L.dumpw()
#
# In outdoor situation we delete transmission node involving
# an indoor cycle at the exception of AIR
#
cindoor = [p for p in self.L.Gt.nodes() if self.L.Gt.node[p]['indoor']]
if self._L.typ =='outdoor':
u = self.L.Gi.node.keys()
# lT : list of transmission interactions
lT = [k for k in u if (len(k)==3)]
# lTi : transmission connected at least to an indoor cycle
lTi = [ k for k in lT if ((k[1] in cindoor) or (k[2] in cindoor))]
# lTiw : those which are walls (not those above buildings)
lTiw = [ k for k in lTi if self.L.Gs.node[k[0]]['name']!='AIR' ]
self.L.Gi.remove_nodes_from(lTiw)
lE = list(self.L.Gi.edges())
for k in range(len(lE)):
e = lE[k]
try:
output = self.L.Gi[e[0]][e[1]]['output']
except:
pdb.set_trace()
tbd = []
for l in output.keys():
if l in lTiw:
tbd.append(l)
for d in tbd :
del output[d]
self.L.Gi[e[0]][e[1]]['output']=output
#self.L.dumpw()
#self.L.build()
self.init_positions()
###########
# init freq
# TODO Check where it is used redundant with fGHz
###########
#self.fmin = self.fGHz[0]
#self.fmax = self.fGHz[-1]
#self.fstep = self.fGHz[1]-self.fGHz[0]
self.Si = Signatures(self.L,self.ca,self.cb,cutoff=self.cutoff)
self.R = Rays(self.a,self.b)
self.C = Ctilde()
self.H = Tchannel()
@property
def Lname(self):
return self._Lname
@property
def L(self):
return self._L
@property
def a(self):
return self._a
@property
def b(self):
return self._b
@property
def ca(self):
return self._ca
@property
def cb(self):
return self._cb
@property
def Aa(self):
return self._Aa
@property
def Ab(self):
return self._Ab
@property
def Ta(self):
return self._Ta
@property
def Tb(self):
return self._Tb
@property
def fGHz(self):
return self._fGHz
@property
def cutoff(self):
return self._cutoff
@property
def delay_excess_max_ns(self):
return self._delay_excess_max_ns
@property
def threshold(self):
return self._threshold
@property
def wav(self):
return self._wav
@L.setter
def L(self,L):
# change layout and build/load
plotfig=False
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
mlab.clf()
plotfig=True
if isinstance(L,str):
self._L = Layout(L,bgraphs=False,bcheck=False)
self._Lname = L
elif isinstance(L,Layout):
self._L = L
self._Lname = L._filename
self.reset_config()
if plotfig:
self._show3()
@Lname.setter
def Lname(self,Lname):
# change layout and build/load
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
mlab.clf()
self._L = Layout(Lname)
self._Lname = Lname
self.reset_config()
@a.setter
def a(self,position):
if not self.L.ptin(position):
# limit position in the visible region L.ax
#if position[0] < self.L.ax[0]:
#position[0] = self.L.ax[0]
# if position[0] > self.L.ax[1]:
# position[0] = self.L.ax[1]
# if position[1] < self.L.ax[2]:
# position[1] = self.L.ax[2]
# if position[1] > self.L.ax[3]:
# position[1] = self.L.ax[3]
raise NameError ('Warning : point a is not inside the Layout')
# raise NameError ('Warning : point a is not inside the Layout')
if not self.L.pt2cy(position) == self.ca:
self.ca = self.L.pt2cy(position)
self._a = position
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
self._update_show3(ant='a',delrays=True)
if hasattr(self,'ca') and hasattr(self,'cb'):
self._autocufoff()
self.checkh5()
@b.setter
def b(self,position):
if not self.L.ptin(position):
# if position[0]<self.L.ax[0]:
# position[0]=self.L.ax[0]
# if position[0]>self.L.ax[1]:
# position[0]=self.L.ax[1]
# if position[1]<self.L.ax[2]:
# position[1]=self.L.ax[2]
# if position[1]>self.L.ax[3]:
# position[1]=self.L.ax[3]
raise NameError ('Warning : point b is not inside the Layout')
if not self.L.pt2cy(position) == self.cb:
self.cb = self.L.pt2cy(position)
self._b = position
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
self._update_show3(ant='b',delrays=True)
if hasattr(self,'ca') and hasattr(self,'cb'):
self._autocufoff()
self.checkh5()
@ca.setter
def ca(self,cycle):
if not cycle in self.L.Gt.nodes():
raise NameError ('cycle ca is not inside Gt')
self._ca = cycle
self.a = self.L.cy2pt(cycle)
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@cb.setter
def cb(self,cycle):
if not cycle in self.L.Gt.nodes():
raise NameError ('cycle cb is not inside Gt')
self._cb = cycle
self.b = self.L.cy2pt(cycle)
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@Aa.setter
def Aa(self,Ant):
if hasattr(self.Aa,'_mayamesh'):
self.Aa._mayamesh.remove()
# save rot
rot = self.Ta
self._Aa = Ant
self.Ta = rot
if Ant.fromfile:
self.fGHz = Ant.fGHz
print("Warning : frequency range modified by antenna Aa")
else:
self._Aa.fGHz = self.fGHz
# self.initfreq()
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
self._update_show3(ant='a')
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@Ab.setter
def Ab(self,Ant):
if hasattr(self.Ab,'_mayamesh'):
self.Ab._mayamesh.remove()
#save rot
rot = self.Tb
self._Ab = Ant
self.Tb = rot
if Ant.fromfile:
self.fGHz = Ant.fGHz
print("Warning : frequency range modified by antenna Ab")
else:
self._Ab.fGHz = self.fGHz
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
self._update_show3(ant='b')
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@Ta.setter
def Ta(self,orientation):
self._Ta = orientation
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
self._update_show3(ant='a')
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@Tb.setter
def Tb(self,orientation):
self._Tb = orientation
if hasattr(self,'_maya_fig') and self._maya_fig._is_running:
self._update_show3(ant='b')
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
# if self.dexist['Ct']['exist']:
# self.C.locbas(Ta=self.Ta, Tb=self.Tb)
# #T channel
# self.H = self.C.prop2tran(a=self.Aa,b=self.Ab,Friis=True)
@cutoff.setter
def cutoff(self,cutoff):
co = max(cutoff,1)
self._cutoff=co
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@threshold.setter
def threshold(self,threshold):
th = min(threshold,1.)
th = max(threshold,0.)
self._threshold= th
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
@delay_excess_max_ns.setter
def delay_excess_max_ns(self,delay_excess_max_ns):
delay_excess_max_ns = max(delay_excess_max_ns,0.)
self._delay_excess_max_ns = delay_excess_max_ns
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5
@fGHz.setter
def fGHz(self,freq):
if not isinstance(freq,np.ndarray):
freq=np.array([freq])
diff_freq_a = (self.Aa.fGHz!=freq)
diff_freq_b = (self.Ab.fGHz!=freq)
if isinstance(diff_freq_a,bool):
cond_a = diff_freq_a
else:
cond_a = diff_freq_a.all()
if isinstance(diff_freq_b,bool):
cond_b = diff_freq_b
else:
cond_b = diff_freq_b.all()
if (self.Aa.fromfile) & cond_a:
print(" Antenna Aa frequency range is fixed, you cannot change frequency")
elif (self.Ab.fromfile) & cond_b:
print(" Antenna Ab frequency range is fixed,you cannot change frequency")
else:
self._fGHz = freq
self.Aa.fGHz=self.fGHz
self.Ab.fGHz=self.fGHz
if hasattr(self,'ca') and hasattr(self,'cb'):
self.checkh5()
# if self.Aa.typ == 'Omni':
# self.Aa.fGHz = self.fGHz
# if self.Ab.typ == 'Omni':
# self.Ab.fGHz = self.fGHz
#if len(freq)>1:
# self.fmin = freq[0]
# self.fmax = freq[-1]
# self.fstep = freq[1]-freq[0]
#else:
# self.fmin = freq
# self.fmax = freq
# self.step = 0
@wav.setter
def wav(self,waveform):
self._wav = waveform
if 'H' in dir(self):
if len(self.H.taud[0])!=0:
self.chanreal = self.H.get_cir(self.wav.sfg)
def __repr__(self):
if hasattr(self,'filename'):
s = 'filename: ' + self.filename +'\n'
#s = s + 'Layout file: ' + self.Lname + '\n'
s = s + 'Node A \n'
s = s + '------ \n'
s = s + '\tcoord : ' + str (self.a) + " in cycle " + str(self.ca) + '\n'
s = s + '\tantenna type: ' + str (self.Aa.typ) + '\n'
if (self.Ta!=np.eye(3)).any():
s = s + '\trotation matrice : \n ' + str (self.Ta) + '\n\n'
s = s + 'Node B \n'
s = s + '------ \n'
s = s + '\tcoord : ' + str (self.b) + " in cycle " + str(self.cb) + '\n'
s = s + '\tantenna : ' + str (self.Ab.typ) + '\n'
if (self.Ta!=np.eye(3)).any():
s = s + 'rotation matrice : \n ' + str (self.Tb) + '\n\n'
s = s + '---------------- \n'
s = s + 'distance AB : ' + str("%6.3f" % np.sqrt(np.sum((self.a-self.b)**2))) + ' m \n'
s = s + 'delay AB : ' + str("%6.3f" % (np.sqrt(np.sum((self.a-self.b)**2))/0.3)) + ' ns\n'
rd2deg = 180/np.pi
if not np.allclose(self.a,self.b):
vsba = self.b-self.a
a1 = geu.angledir(vsba[None,:])
a2 = geu.angledir(-vsba[None,:])
s = s + 'azimuth (A | B) : %.2f ' % (a1[0,1]*rd2deg) +' deg | %.2f' % (a2[0,1]*rd2deg) + ' deg\n'
s = s + 'elevation (A | B) : %.2f' % (a1[0,0]*rd2deg) + ' deg | %.2f ' % (a2[0,0]*rd2deg) + ' deg\n'
s = s + 'tilt (A | B) : '+str((a1[0,0]-np.pi/2)*rd2deg)+ ' deg | '+ str((a2[0,0]-np.pi/2)*rd2deg)+ ' deg\n'
#s = s + 'Frequency range : \n'
s = s + '------------- \n'
Nf = len(self.fGHz)
s = s + 'fGHz : %.2f, %.2f, %g ' %(self.fGHz[0],self.fGHz[-1],Nf) +'\n'
if Nf>1:
s = s + 'fstep (GHz) : ' + str(self.fGHz[1]-self.fGHz[0]) +'\n'
d = np.sqrt(np.sum((self.a-self.b)**2))
if Nf>1:
fcGHz = (self.fGHz[-1]+self.fGHz[0])/2.
else:
fcGHz = self.fGHz[0]
L = 32.4+20*np.log(d)+20*np.log10(fcGHz)
s = s + '------------- \n'
s = s + 'cutoff/threshold : %g / %.2f' %(self.cutoff, self.threshold)+'\n'
s = s + 'max delay /dist: %.2f ns / %.2f m' %(self.delay_excess_max_ns,self.delay_excess_max_ns*0.3)+'\n'
s = s + '-------------- \n'
if hasattr(self,'Si'):
s = s + '# Si : ' + str(len(self.Si))
if hasattr(self,'r2d'):
s = s + '\n# r2d : ' + str(len(self.r2d))
if hasattr(self,'R'):
s = s + '\n# R : ' + str(len(self.R))
if hasattr(self,'C'):
s = s + '\n# C.Ctt.y : ' + str(self.C.Ctt.y.shape)
s = s + '\n# C.Ctp.y : ' + str(self.C.Ctp.y.shape)
s = s + '\n# C.Cpt.y : ' + str(self.C.Cpt.y.shape)
s = s + '\n# C.Cpp.y : ' + str(self.C.Cpp.y.shape)
else:
s = 'No Layout specified'
return s
def inforay(self,iray):
""" provide full information about a specified ray
Parameters
----------
iray : int
ray index
"""
print("Ray : "+str(iray))
if not self.R.evaluated:
self.R.eval()
PM = self.R.info(iray,ifGHz=0,matrix=1)
print("Propagation Channel 2x2 (C):")
self.C.inforay(iray)
if self.C.islocal:
# back to global frame
self.C.locbas()
dist = self.C.tauk[iray]*0.3
C = dist*np.array([[self.C.Ctt.y[iray,0],self.C.Ctp.y[iray,0]],
[self.C.Cpt.y[iray,0],self.C.Cpt.y[iray,0]]] )
b = np.allclose(PM,C)
self.C.locbas()
# def initfreq(self):
# """ Automatic freq determination from
# Antennas
# """
# #sf = self.fGHz[1]-self.fGHz[0]
# sf = 1e15
# if hasattr(self.Aa,'fGHz'):
# fa = self.Aa.fGHz
# if len(fa)==0:
# fa = np.array([2.4])
# self.Aa.fGHz = fa
# # raise AttributeError("Incompatible frequency range in Antenna. Consider change Dlink.fGHz")
# print "Incompatible frequency range in Antenna. WARNING Dlink.fGHz changed to 2.4GHz"
# try:
# sa = fa[1]-fa[0] # step frequecy
# except: #single frequency
# sa = fa[0]
# # step
# if len(self.fGHz)>0:
# minfa = max(min(fa),min(self.fGHz))
# maxfa = min(max(fa),max(self.fGHz))
# else:
# minfa = min(fa)
# maxfa = max(fa)
# sf = min(sa,sf)
# self.fGHz = np.arange(minfa,maxfa+sf,sf)
# elif hasattr(self.Ab,'fGHz'):
# fb = self.Ab.fGHz
# if len(fb)==0:
# # raise AttributeError("Incompatible frequency range in Antenna. Consider change Dlink.fGHz")
# fb = np.array([2.4])
# self.Ab.fGHz=fb
# # raise AttributeError("Incompatible frequency range in Antenna. Consider change Dlink.fGHz")
# print "Incompatible frequency range in Antenna. WARNING Dlink.fGHz changed to 2.4GHz"
# try:
# sb = fb[1]-fb[0] # step frequency
# except:
# sb = fb[0]
# if len(self.fGHz)>0:
# minfb = max(min(self.fGHz),min(fb))
# maxfb = min(max(self.fGHz),max(fb))
# else:
# minfb = min(fb)
# maxfb = max(fb)
# sf = min(sf,sb)
# self.fGHz = np.arange(minfb,maxfb+sf,sf)
# else:
# self.fGHz = np.array([2.3,2.4,2.5])
def init_positions(self,force=False):
""" initialize random positions for a link
Parameters
----------
force : boolean
"""
###########
# init pos & cycles
#
# If a and b are not specified
# they are chosen as center of gravity of cycle 0
#
###########
nodes = self.L.Gt.nodes()
#
# pick the point outside building if Layout.indoor not activated
#
if self.L.typ=='outdoor':
nodes = [n for n in nodes if n!=0 and not self.L.Gt.node[n]['indoor']]
elif self.L.typ=='indoor':
nodes = [n for n in nodes if n!=0 and self.L.Gt.node[n]['indoor']]
else:
nodes = [n for n in nodes if n!=0 ]
# draw the link extremities randomly
np.random.seed(self.seed)
ia = np.random.randint(0,len(nodes))
ib = np.random.randint(0,len(nodes))
if len(self.a)==0 or force:
self.ca = nodes[ia]
else:
if len(self.a) ==2:
a=np.r_[self.a,1.0]
else:
a=self.a
self.ca = self.L.pt2cy(a)
self.a = a
if len(self.b)==0 or force:
self.cb = nodes[ib]
else:
if len(self.b) ==2:
b=np.r_[self.b,1.0]
else:
b=self.b
self.cb = self.L.pt2cy(b)
self.b = b
def reset_config(self):
""" reset configuration when a new layout is loaded
"""
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
# self.a = self.L.cy2pt(self.ca)
# self.b = self.L.cy2pt(self.cb)
# change h5py file if layout changed
self.filename = 'Links_' + str(self.save_idx) + '_' + self._Lname + '.h5'
filenameh5 = pyu.getlong(self.filename,pstruc['DIRLNK'])
if not os.path.exists(filenameh5) :
print('Links save file for ' + self.L._filename + ' does not exist.')
print('It is beeing created. You\'ll see that message only once per Layout')
self.save_init(filenameh5)
self.ca = 1
self.cb = 1
self.init_positions(force=True)
try:
delattr(self,'Si')
except:
pass
try:
delattr(self,'R')
except:
pass
try:
delattr(self,'C')
except:
pass
try:
delattr(self,'H')
except:
pass
def checkh5(self):
""" check existence of previous simulations run with the same parameters.
Returns
-------
update self.dexist dictionnary
"""
filenameh5 = pyu.getlong(self.filename,'output')
# get identifier group name in h5py file
if os.path.exists(filenameh5):
self.get_grpname()
# check if group name exists in the h5py file
[self.check_grpname(k,self.dexist[k]['grpname']) for k in self.save_opt]
def save_init(self,filename_long):
""" initialize the hdf5 file for link saving
Parameters
----------
filename_long : str
complete path and filename
'sig' : Signatures
'ray2' : 2D rays
'ray' : 3D rays
'Ct' : Propagation channel
'H' : Transmission channel
'p_map' : points
'c_map' : cycles
'f_map' : frequency
'A_map' : antennas
'T_map' : rotation
"""
f=h5py.File(filename_long,'w')
# try/except to avoid loosing the h5 file if
# read/write error
try:
f.create_group('sig')
f.create_group('ray2')
f.create_group('ray')
f.create_group('Ct')
f.create_group('H')
# mapping point a
f.create_dataset('p_map',shape=(0,3), maxshape=(None,3),dtype='float64')
# mapping cycles
f.create_dataset('c_map',shape=(0,3), maxshape=(None,3),dtype='int')
# mapping (fmin,fmax,fstep)
f.create_dataset('f_map',shape=(0,3), maxshape=(None,3),dtype='float64')
# mapping Antenna name
f.create_dataset('A_map',shape=(0,1), maxshape=(None,1),dtype="S10")
# mapping rotation matrices Antenna
f.create_dataset('T_map',shape=(0,3,3), maxshape=(None,3,3),dtype='float64')
f.close()
except:
f.close()
raise NameError('Links: issue when initializing h5py file')
def stack(self,key,array):
""" stack new array in h5py file
for a given key (dataframe/group)
Parameters
----------
key : string
array : np.ndarray
Returns
-------
idx : int
indice of last element of the array of key
"""
try :
lfilename=pyu.getlong(self.filename,pstruc['DIRLNK'])
f=h5py.File(lfilename,'a')
if type(array)==str:
array = array.encode('utf-8')
if key != 'T_map':
sc = f[key].shape
f[key].resize((sc[0]+1,sc[1]))
f[key][-1,:] = array
else:
sc = f[key].shape
f[key].resize((sc[0]+1,sc[1],sc[2]))
f[key][-1,:,:] = array
f.close()
return np.array([sc[0]])
except:
f.close()
raise NameError('Link stack: issue during stacking')
def _delete(self,key,grpname):
""" Delete a key and associated data into h5py file
Parameters
----------
key : string
key of the h5py file
grpname : string
groupe name of the h5py file
"""
lfilename = pyu.getlong(self.filename,pstruc['DIRLNK'])
f = h5py.File(lfilename,'a')
# try/except to avoid loosing the h5 file if
# read/write error
try:
del f[key][grpname]
# print 'delete ',key , ' in ', grpname
f.close()
except:
f.close()
raise NameError('Link._delete: issue when deleting in h5py file')
def save(self,obj,key,grpname,force=False):
""" Save a given object in the correct group
Parameters
----------
obj : Object
(Signatures|Rays|Ctilde|Tchannel)
key : string
key of the h5py file
gpname : string
groupe name of the h5py file
force : boolean or list
"""
if not force :
obj._saveh5(self.filename,grpname)
# if save is forced, previous existing data are removed and
# replaced by new ones.
else :
if self.dexist[key]['exist']:
self._delete(key,grpname)
#if type(grpname)==str:
# grpname.encode('utf-8')
#print(key,grpname)
#if key=='ray':
# pdb.set_trace()
obj._saveh5(self.filename,grpname)
logger.debug(str(obj.__class__).split('.')[-1] + ' from '+ grpname + ' saved')
def load(self,obj,grpname,**kwargs):
""" Load a given object in the correct grp
Parameters
----------
obj : Object
(Signatures|Rays|Ctilde|Tchannel)
grpname : string
group name of the h5py file
kwargs :
layout for sig and rays
"""
obj._loadh5(self.filename,grpname,**kwargs)
logger.debug(str(obj.__class__).split('.')[-1] + ' from '+ grpname + ' loaded')
def get_grpname(self):
""" Determine the data group name for the given configuration
Notes
-----
Update the key grpname of self.dexist[key] dictionnary,
where key = 'sig'|'ray'|'Ct'|'H'
"""
############
# Signatures
############
array = np.array(([self.ca,self.cb,self.cutoff]))
ua_opt, ua = self.get_idx('c_map',array)
th = str(int(np.round(self.threshold,decimals=2)*100))
grpname = str(self.ca) + '_' +str(self.cb) + '_' + str(self.cutoff) + '_' + th
self.dexist['sig']['grpname']=grpname
############
# Rays
#############
# check existence of self.a in h5py file
ua_opt, ua = self.get_idx('p_map',self.a)
# check existence of self.b in h5py file
ub_opt, ub = self.get_idx('p_map',self.b)
# Write in h5py if no prior a-b link
grpname = str(self.cutoff) + '_' + th + '_' + str(ua) + '_' +str(ub)
self.dexist['ray2']['grpname'] = grpname
self.dexist['ray']['grpname'] = grpname
############
# Ctilde
#############
# check existence of frequency in h5py file
#farray = np.array(([self.fmin,self.fmax,self.fstep]))
Nf = len(self.fGHz)
if Nf > 1:
farray = np.array(([self.fGHz[0], self.fGHz[-1], self.fGHz[1]-self.fGHz[0]]))
else:
farray = np.array(([self.fGHz[0], self.fGHz[-1],0]))
uf_opt, uf = self.get_idx('f_map', farray)
grpname = str(ua) + '_' + str(ub) + '_' + str(uf)
self.dexist['Ct']['grpname'] = grpname
############
# H
#############
# check existence of Rot a (Ta) in h5py file
uTa_opt, uTa = self.get_idx('T_map',self.Ta)
# check existence of Rot b (Tb) in h5py file
uTb_opt, uTb = self.get_idx('T_map',self.Tb)
# check existence of Antenna a (Aa) in h5py file
#uAa_opt, uAa = self.get_idx('A_map',self.Aa._filename)
uAa_opt, uAa = self.get_idx('A_map',self.Aa.typ)
# check existence of Antenna b (Ab) in h5py file
uAb_opt, uAb = self.get_idx('A_map',self.Ab.typ)
grpname = str(ua) + '_' + str(ub) + '_' + str(uf) + \
'_' + str(uTa) + '_' + str(uTb) + \
'_' + str(uAa) + '_' + str(uAb)
self.dexist['H']['grpname'] = grpname
def check_grpname(self,key,grpname):
"""Check if the key's data with a given groupname
already exists in the h5py file
Parameters
----------
key: string
key of the h5py group
grpname : string
groupe name of the h5py file
Notes
-----
update the key grpname of self.dexist[key] dictionnary
"""
try :
lfilename=pyu.getlong(self.filename,pstruc['DIRLNK'])
f = h5py.File(lfilename,'r')
if grpname.encode('utf8') in f[key].keys():
self.dexist[key]['exist'] = True
else :
self.dexist[key]['exist'] = False
f.close()
except:
f.close()
raise NameError('Link exist: issue during stacking')
def get_idx(self,key,array,tol=1e-3):
""" get the index of the requested array in the group key
of the hdf5 file.
If array doesn't exist, the hdf5file[key] array is stacked
Parameters
----------
key: string
key of the h5py group
array : np.ndarray
array to check existence
tol : np.float64
tolerance (in meters for key == 'p_map')
Returns
-------
(u_opt, u): tuple
u : np.ndarray
the index in the array of the file[key] group
u_opt : string ('r'|'s')
return 'r' if array has been read into h5py file
return 's' if array has been stacked into the array of group key
See Also:
--------
Links.array_exist
"""
umap = self.array_exist(key,array,tol=tol)
lu = len(umap)
# if exists take the existing one
# otherwise value is created
if lu != 0:
u = umap
u_opt='r'
else :
u = self.stack(key,array)
u_opt='s'
return u_opt,u[0]
def array_exist(self,key,array,tol=1e-3) :
""" check an array key has already been stored in h5py file
Parameters
----------
key: string
key of the h5py group
array : np.ndarray
array type to check existency
tol : np.float64
tolerance (in meter for key == 'p_map')
Returns
-------
(ua)
ua : np.ndarray
the indice in the array of the file[key] group
if the array is emtpy, value doesn't exist
TODO
----
Add a tolerance on the rotation angle (T_map)
"""
lfilename = pyu.getlong(self.filename,pstruc['DIRLNK'])
# file should exist before calling (append mode)
f = h5py.File(lfilename,'a')
try :
fa = f[key][...]
f.close()
except:
f.close()
raise NameError('Link check_exist: issue during reading')
if key == 'c_map':
eq = array == fa
# sum eq = 3 means cy0,cy1 and cutoff are the same in fa and array
ua = np.where(np.sum(eq,axis=1)==3)[0]
elif key == 'p_map':
da = np.sqrt(np.sum((array-fa)**2,axis=1))
# indice points candidate in db for a
ua = np.where(da<tol)[0]
elif key == 'f_map':
# import ipdb
# ipdb.set_trace()
#### fmin_h5 < fmin_rqst
ufmi = fa[:,0]<=array[0]
# old version
# ufmi = np.where(fa[:,0]<=array[0])[0]
# lufmi = len(ufmi)
#### fmax_h5 > fmax_rqst
ufma = fa[:,1]>=array[1]
# old version
# ufma = np.where(fa[:,1]>=array[1])[0]
# lufma = len(ufma)
### fstep_h5 < fstep_rqst
ufst = fa[:,2]<=array[2]
# old version
# ufst = np.where(fa[:,2]<=array[2])[0]
# lufst = len(ufst)
# if fmin, fmax or fstep
#if (lufmi==0) and (lufma==0) and (lufst==0):
if (not ufmi.any()) and (not ufma.any()):
ua = np.array([])
else:
# find common lines of fmin and fmax and fstep
ua = np.where(ufmi & ufma & ufst)[0]
# ua = np.where(np.in1d(ufmi,ufma,ufst))[0]
# # find common lines of fmin and fmax
# ufmima = np.where(np.in1d(ufmi,ufma))[0]
# # find common lines of fmin, fmax and fstep
# ua = np.where(np.in1d(ufmima,ufst))[0]
elif key == 'A_map':
ua = np.where(fa == array.encode('utf-8'))[0]
elif key == 'T_map':
eq = array == fa
seq = np.sum(np.sum(eq,axis=1), axis=1)
ua = np.where(seq == 9)[0]
else :
raise NameError('Link.array_exist : invalid key')
return ua
def evalH(self,**kwargs):
""" evaluate channel transfer function
Notes
-----
This function modifies the orientation of the antenna at both sides
via Ta and Tb 3x3 matrices and recalculates the channel transfer function
for those new orientations.
The self.H variable is updated
"""
# Antenna Rotation
self.C.locbas(Ta=self.Ta, Tb=self.Tb)
# Transmission channel calculation
H = self.C.prop2tran(a=self.Aa, b=self.Ab, Friis=True, debug=True)
self.H = H
def eval(self,**kwargs):
""" link evaluation
Parameters
----------
applywav :boolean
Apply waveform to H
force : list
Force the computation (['sig','ray2','ray,'Ct','H']) AND save (replace previous computations)
alg : 1|'old'|'exp'|'exp2'
version of run for signature
si_progress: bollean ( False)
display progression bar for signatures
diffraction : boolean (False)
takes into consideration diffraction points
ra_number_mirror_cf : int
rays.to3D number of ceil/floor reflexions
ra_ceil_H: float, (default [])
ceil height .
If [] : Layout max ceil height
If 0 : only floor reflection (outdoor case)
If -1 : neither ceil nor floor reflection (2D case)
ra_vectorized: boolean (True)
if True used the (2015 new) vectorized approach to determine 2drays
progressbar: str
None: no progress bar
python : progress bar in ipython
Notes
-----
update self.ak and self.tk
self.ak : ndarray
alpha_k
self.tk : ndarray
tau_k
Examples
--------
.. plot::
:include-source:
>>> from pylayers.simul.link import *
>>> DL=DLink(L="defstr.lay")
>>> DL.eval()
>>> DL.show()
>>> DL.plt_cir()
>>> plt.show()
See Also
--------
pylayers.antprop.signature
pylayers.antprop.rays
"""
defaults = {'applywav': False,
'si_progress': True,
'diffraction': True,
'ra_vectorized': True,
'ra_ceil_H': [],
'ra_number_mirror_cf': 1,
'force': True,
'bt': True,
'si_reverb': 4,
'nD': 2,
'nR': 10,
'nT': 10,
'debug': False,
'progressbar': None,
'rm_aw': True
}
# check antenna frequency range compatibility
if (self.Aa.fGHz!=self.Ab.fGHz).all():
raise AttributeError("Antenna frequency range are not compatible")
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if 'delay_excess_max_ns' not in kwargs:
kwargs['delay_excess_max_ns'] = self.delay_excess_max_ns
else:
self.delay_excess_max_ns = kwargs['delay_excess_max_ns']
if 'cutoff' not in kwargs:
kwargs['cutoff'] = self.cutoff
else:
self.cutoff=kwargs['cutoff']
if 'threshold' not in kwargs:
kwargs['threshold'] = self.threshold
else:
self.threshold=kwargs['threshold']
if 'force' in kwargs:
if not isinstance(kwargs['force'],list):
if kwargs['force'] == True :
kwargs['force'] = ['sig','ray2','ray','Ct','H']
else :
# Ct and H are not yet saved/loaded
# compliantly with the given configutain
# their are disabled here
kwargs['force'] = ['Ct','H']
# self.checkh5()
if isinstance(kwargs['progressbar'],str):
if kwargs['progressbar'] =='notebook':
pbar = tqdm.tqdm_notebook(total=100)
elif kwargs['progressbar']=='python':
pbar = tqdm.tqdm(total=100)
elif isinstance(kwargs['progressbar'],tqdm.tqdm):
pbar = kwargs['progressbar']
############
# Signatures
############
tic = time.time()
Si = Signatures(self.L,
self.ca,
self.cb,
cutoff = kwargs['cutoff'],
threshold = kwargs['threshold'])
if (self.dexist['sig']['exist'] and not ('sig' in kwargs['force'])):
logger.info(" Load existing signatures from :%s",self.dexist['sig']['grpname'])
self.load(Si, self.dexist['sig']['grpname'], L=self.L)
else:
logger.info(" Run signatures")
self.nD = kwargs['nD']
self.nT = kwargs['nT']
self.nR = kwargs['nR']
self.bt = kwargs['bt']
Si.run(cutoff = self.cutoff,
diffraction = kwargs['diffraction'],
threshold = self.threshold,
delay_excess_max_ns = self.delay_excess_max_ns,
nD = self.nD,
nR = self.nR,
nT = self.nT,
progress = kwargs['si_progress'],
bt = self.bt)
logger.info(" Save signature in %s ",self.dexist['sig']['grpname'])
self.save(Si,'sig',self.dexist['sig']['grpname'],force = kwargs['force'])
self.Si = Si
toc = time.time()
logger.info(" End signature in %d sec",toc-tic)
try:
pbar.update(20)
except:
pass
############
# Rays
############
logger.info(" Start Rays determination")
tic = time.time()
r2d = Rays(self.a,self.b)
#############
# get 2D rays
#############
if self.dexist['ray2']['exist'] and not ('ray2' in kwargs['force']):
logger.info(" Load r2d from %s", self.dexist['ray2']['grpname'])
self.load(r2d,self.dexist['ray2']['grpname'], L=self.L)
else :
# perform computation ...
# ... with vectorized ray evaluation
logger.debug(" a : (%f,%f,%f)", self.a[0], self.a[1], self.a[2])
logger.debug(" b : (%f,%f,%f)", self.b[0], self.b[1], self.b[2])
if kwargs['ra_vectorized']:
logger.info(" Determine r2d vectorized version")
r2d = Si.raysv(self.a,self.b)
# ... or with original and slow approach ( to be removed in a near future)
else :
logger("Determine r2d non vectorized version")
r2d = Si.rays(self.a,self.b)
# save 2D rays
logger.info(" save r2d in %s ",self.dexist['ray2']['grpname'])
self.save(r2d,'ray2', self.dexist['ray2']['grpname'], force = kwargs['force'])
self.r2d = r2d
#############
# get 3D rays
#############
R = Rays(self.a,self.b)
R.is3D = True
if self.dexist['ray']['exist'] and not ('ray' in kwargs['force']):
logger.info(" Load r3d from %s", self.dexist['ray']['grpname'])
self.load(R,self.dexist['ray']['grpname'], L=self.L)
else :
if kwargs['ra_ceil_H'] == []:
if self.L.typ=='indoor':
ceilheight = self.L.maxheight
else:
ceilheight = 0
else:
ceilheight = kwargs['ra_ceil_H']
logger.info(" Run to3d H: %d, N: %d", ceilheight,kwargs['ra_number_mirror_cf'] )
R = self.r2d.to3D(self.L, H=ceilheight, N=kwargs['ra_number_mirror_cf'])
if kwargs['rm_aw']:
R = R.remove_aw(self.L)
logger.info(" Run R.locbas ")
R.locbas(self.L)
logger.info(" Run R.fillinter ")
R.fillinter(self.L)
# C = Ctilde()
# C = R.eval(self.fGHz)
# save 3D rays
logger.info(" Save 3D rays in %s",self.dexist['ray']['grpname'])
self.save(R, 'ray', self.dexist['ray']['grpname'], force = kwargs['force'])
self.R = R
toc = time.time()
logger.info(" Stop rays %d",toc-tic)
if self.R.nray == 0:
raise NameError('No rays have been found. Try to re-run the simulation with a higher S.cutoff ')
try:
pbar.update(20)
except:
pass
############
# Ctilde
############
if self.dexist['Ct']['exist'] and not ('Ct' in kwargs['force']):
C = Ctilde()
self.load(C,self.dexist['Ct']['grpname'])
else :
#if not hasattr(R,'I'):
# Ctilde...
# Find an other criteria in order to decide if the R has
# already been evaluated
C = R.eval(self.fGHz)
# ...save Ct
self.save(C,'Ct',self.dexist['Ct']['grpname'],force = kwargs['force'])
self.C = C
try:
pbar.update(20)
except:
pass
############
# H
############
H = Tchannel()
if self.dexist['H']['exist'] and not ('H' in kwargs['force']):
self.load(H,self.dexist['H']['grpname'])
else :
# Ctilde antenna
C.locbas(Ta=self.Ta, Tb=self.Tb)
#T channel
H = C.prop2tran(a=self.Aa,b=self.Ab,Friis=True,debug=kwargs['debug'])
self.save(H,'H',self.dexist['H']['grpname'],force = kwargs['force'])
self.H = H
try:
pbar.update(20)
except:
pass
if kwargs['applywav']:
if self.H.isFriis:
self.ir = self.H.get_cir(self.wav.sf)
else:
self.ir = self.H.get_cir(self.wav.sfg)
try:
pbar.update(20)
except:
pass
self.checkh5()
def adp(self,imax=1000):
""" construct the angular delay profile
Parameters
----------
imax : int
"""
self.adpmes = self.afpmes.toadp()
self.adpmes.cut(imax=imax)
self.adprt = self.afprt.toadp()
self.adprt.cut(imax=imax)
def afp(self,**kwargs):
""" Evaluate angular frequency profile
Parameters
----------
fGHz : np.array
frequency range
az : azimuth angle (radian)
tilt : tilt angle (-pi/2<tilt<pi/2)
polar : string
win : string 'rect' | 'hamming'
_filemeas : string
_filecal : string
ang_offset :
BW : float
bandwidth
ext : string
'txt' | 'mat'
dirmeas : string
directory of the data in the project path
Notes
-----
If a measurement file is given the angular range is obtained from the measurement
otherwise the variable az is used.
"""
defaults = {'fGHz':32.6,
'az': 0,
'tilt':0,
'polar':'V',
'win':'rect',
'_filemeas':'',
'_filecal':'',
'ang_offset' : 0.37,
'BW': 1.6,
'ext':'txt',
'dirmeas':'meas',
'refinement':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fGHz = kwargs.pop('fGHz')
az = kwargs.pop('az')
tilt = kwargs.pop('tilt')
polar = kwargs.pop('polar')
win = kwargs.pop('win') # argument for loadmes
_filemeas = kwargs.pop('_filemeas') # argument for loadmes
_filecal = kwargs.pop('_filecal') # argument for loadmes
dirmeas = kwargs.pop('dirmeas') # argument for loadmes
ang_offset = kwargs.pop('ang_offset') # argument for loadmes
BW = kwargs.pop('BW') # argument for loadmes
ext = kwargs.pop('ext') # argument for loadmes
refinement = kwargs.pop('refinement') # argument for loadmes
# read measurement if available
if _filemeas!='':
fcGHz = self.fGHz[0]
self.afpmes = AFPchannel(tx=self.a,rx=self.b)
self.afpmes.loadmes(_filemeas,
_filecal,
fcGHz=fcGHz,
BW=BW,
win=win,
ang_offset=ang_offset,
ext=ext,
dirmeas=dirmeas,
refinement=refinement)
az = self.afpmes.az
#
# afpmes.x
# afpmes.y
# afpmes.fcGHz
# afpmes.az measure angular range
# afpmes.azrt ray tracing angular range
# create an empty AFP
# tx = a
# rx = b
# angular range (a) : phi
#
self.afprt = AFPchannel(tx=self.a,rx=self.b,az=az)
for k,ph in enumerate(az.squeeze()):
self.Tb = geu.MATP(self.Ab.sl,self.Ab.el,ph,tilt,polar)
# self._update_show3(ant='b')
# pdb.set_trace()
self.evalH()
E = self.H.energy()
if k==0:
self.dpadp = E[None,...]
else:
self.dpadp = np.concatenate((self.dpadp,E[None,...]),axis=0)
if self.H.y.shape[3]!=1:
S = np.sum(self.H.y*np.exp(-2*1j*np.pi*self.H.x[None,None,None,:]*self.H.taud[:,None,None,None]),axis=0)
else:
S = np.sum(self.H.y*np.exp(-2*1j*np.pi*fGHz*self.H.taud[:,None,None,None]),axis=0)
try:
self.afprt.y = np.vstack((self.afprt.y,np.squeeze(S)))
except:
self.afprt.y = np.squeeze(S)
if self.H.y.shape[3]!=1:
self.afprt.x = self.H.x
self.afprt.fcGHz = self.afprt.x[len(self.afprt.x)/2]
else:
self.afprt.x = fGHz
self.afprt.fcGHz = fGHz[len(fGHz)/2]
self.dpdp = np.sum(self.dpadp,axis=0)
self.dpap = np.sum(self.dpadp,axis=1)
#return(afp)
def select(self):
fig,ax = self.show()
self.cid = fig.canvas.mpl_connect('button_press_event',self.OnClick)
return(fig,ax)
def OnClick(self,event):
x = event.xdata
y = event.ydata
if event.button==1:
self.a=np.array([x,y,1.2])
self.caf.set_offsets(np.array([[x,y]]))
plt.draw()
if event.button==3:
self.b=np.array([x,y,1.2])
self.cbf.set_offsets(np.array([[x,y]]))
plt.draw()
if event.button==2:
self.eval()
self._show3()
print (x,y)
def show(self,**kwargs):
""" show the link
Parameters
----------
s : int
size of Tx/Rx marker in points
ca : string
color of termination a (A)
cb : string
color of termination b (B)
markera : string
"^"
markerb : string
"o"
alpha : float
marker transparency (0 < alpha <1)
axis : boolean
display axis boolean (default True)
figsize : tuple
figure size if fig not specified default (20,10)
fontsize : int
default 20
rays : boolean
activation of rays vizalization (True)
bsig : boolean
activation of signature vizualization (False)
bsave : boolean
save in a file indexed by ix
laddr : list
list of signature addresses
cmap : colormap
pol : string
'tt','pp','tp','pt','co','cross',tot'
labels : boolean
enabling edge label (useful for signature identification)
color : string
'cmap'
linewidth : float
alpha : float
radius : float
radius in meters for layout vizualization
dB : boolean
default True
dyn : float
dynamic in dB (def 70dB)
ix
vmin
vmax
bcolorbar : boolean
Returns
-------
fig,ax
Examples
--------
>>> from pylayers.simul.link import *
>>> DL=Link()
>>> DL.show(lr=-1,rays=True,dB=True,col='cmap',cmap = plt.cm.jet)
>>> DL.show(laddr=[(6,2)],bsig=True)
"""
defaults ={'s': 300, # size points
'ca': '#6666ff', # color a
'cb': '#ff0000', # color b
'markera': "^", # tri_up
'markerb': "o", # circle
'alpha': 1,
'axis': True,
'lr': -1,
'ls': -1,
'fig': [],
'ax': [],
'figsize': (20,10),
'fontsize': 20,
'rays': True,
'bsig': False,
'bsave': False,
'laddr': [(1,0)],
'cmap': plt.cm.hot_r,
'pol': 'tot',
'color': 'k',
'linewidth': 1,
'alpha': 1,
'radius': -1,
'vmin': [],
'vmax': [],
'dB': True,
'labels': False,
'aw': False,
'dyn': 70,
'ix' : 0,
'vmin': -120,
'vmax': -40,
'bcolorbar': True}
for key in defaults:
if key not in kwargs:
kwargs[key]=defaults[key]
if kwargs['fig']==[]:
fig = plt.figure(figsize=kwargs['figsize'])
else:
fig = kwargs['fig']
if kwargs['ax']==[]:
ax = fig.add_subplot(111)
else:
ax=kwargs['ax']
#
# Layout
#
fig, ax = self.L.showG('s',
nodes = False,
fig = fig,
ax = ax,
labels = kwargs['labels'],
aw = kwargs['aw'],
axis = kwargs['axis'])
if kwargs['radius'] == -1:
kwargs['radius'] = self.L.radius
# background color
#ax.set_facecolor('#cccccc')
#
# Plot Rays
#
if kwargs['rays'] and self.R.nray > 0:
#ECtt,ECpp,ECtp,ECpt = self.C.energy()
#if kwargs['pol']=='tt':
# val = ECtt
#if kwargs['pol']=='pp':
# val = ECpp
#if kwargs['pol']=='tp':
# val = ECtp
#if kwargs['pol']=='pt':
# val = ECpt
#if kwargs['pol']=='tot':
# val = ECtt+ECpp+ECpt+ECtp
#if kwargs['pol']=='co':
# val = ECtt+ECpp
#if kwargs['pol']=='cross':
#" val = ECtp+ECpt
val = self.H.energy()[:,0,0]
clm = kwargs['cmap']
#
# Select group of interactions
#
if ((type(kwargs['lr']) is list) or
(type(kwargs['lr']) is np.ndarray)):
lr = kwargs['lr']
else:
if kwargs['lr']==-1:
lr = np.arange(self.R.nray)
else:
lr = [ int(kwargs['lr']) ]
#
# Set the min and max of ray level
#
if kwargs['vmin'] == []:
vmin = val.min()
vmax = val.max()
if kwargs['dB']:
vmin = 10*np.log10(vmin)
vmax = 10*np.log10(vmax)
else:
vmin = kwargs['vmin']
vmax = kwargs['vmax']
#
# limitation of the vizualization zone around the center of the link
#
pm = (self.a + self.b)/2.
R = np.minimum(kwargs['radius'],1.5*self.L.radius)
#ax.set_xlim(pm[0]-R,pm[0]+R)
#ax.set_ylim(pm[1]-R,pm[1]+R)
#
# each ray ir from list lr has its own color
#
for ir in lr:
if kwargs['dB']:
valdB = np.array(10*np.log10(val[ir]))
valdB = np.maximum(vmin,valdB)
valdB = np.minimum(vmax,valdB)
#RayEnergy = max((10*np.log10(val[ir]/val.max())+kwargs['dyn']),0)/kwargs['dyn']
RayEnergy = (valdB-vmin)/(vmax-vmin)
else:
valLin = val[ir]
valdB = np.maximum(vmin,valLin)
valdB = np.minimum(vmax,valLin)
RayEnergy = (valLin-vmin)/(vmax - vmin)
if kwargs['color'] == 'cmap':
color = clm(RayEnergy)
#width = 10*RayEnergy
linewidth = kwargs['linewidth']
alpha = 1
else:
color = kwargs['color']
linewidth = kwargs['linewidth']
alpha = kwargs['alpha']
# plot ray (i,r)
fig,ax = self.R.show(rlist = [ir],
color = color,
linewidth = 10*RayEnergy,
alpha = alpha,
fig = fig, ax = ax,
layout = False,
points = False,
bcolorbar = kwargs['bcolorbar'],
cmap = kwargs['cmap'],
vmin = vmin,
vmax = vmax )
#if kwargs['color']=='cmap':
# sm = plt.cm.ScalarMappable(cmap=kwargs['cmap'], norm=plt.Normalize(vmin=kwargs['vmin'],
# vmax=kwargs['vmax']))
# sm._A = []
# cb = plt.colorbar(sm)
# cb.ax.tick_params(labelsize=24)
# cb.set_label('Level (dB)', fontsize=24)
#
# Plot signature
#
if kwargs['bsig']:
for addr in kwargs['laddr']:
seq = self.Si[addr[0]][2*addr[1]:2*addr[1]+2,:]
Si = Signature(seq)
isvalid,r,u = Si.sig2ray(self.L,self.a[0:2],self.b[0:2])
fig,ax = Si.show(self.L,self.a[0:2],self.b[0:2],fig=fig,ax=ax)
#
# Point A
#
self.caf = ax.scatter(self.a[0], self.a[1],
c = kwargs['ca'],
s = kwargs['s'],
marker = kwargs['markera'],
edgecolor='black',
facecolor=kwargs['ca'],
linewidth=2,
alpha =kwargs['alpha'],
zorder = 1000)
#ax.text(self.a[0]+0.3,self.a[1]+0.3,'a',
# fontsize = kwargs['fontsize'], bbox=dict(facecolor='white',alpha=0.5))
#
# Point B
#
self.cbf = ax.scatter(self.b[0], self.b[1],
c = kwargs['cb'],
s = kwargs['s'],
marker = kwargs['markerb'],
edgecolor='black',
facecolor=kwargs['cb'],
linewidth=2,
alpha = kwargs['alpha'],
zorder = 1000)
#ax.text(self.b[0]+0.3, self.b[1]+0.3, 'b',
# fontsize=kwargs['fontsize'],bbox=dict(facecolor='white',alpha=0.5))
#
# white scale
#
xe = 1
ye = 3
le = 1
ax.plot(np.array([xe,xe+le]),np.array([ye,ye]),linewidth=4,color='k')
ax.plot(np.array([xe,xe]),np.array([ye,ye+0.2]),linewidth=4,color='k')
ax.plot(np.array([xe+le,xe+le]),np.array([ye,ye+0.2]),linewidth=4,color='k')
ax.text(xe-0.1,ye-0.5,'1 meter',fontsize=18)
#plt.axis('on')
ax.tick_params(labelsize = 24)
ax.set_xlabel('x meters',fontsize = 24)
ax.set_ylabel('y meters',fontsize = 24)
#plt.savefig('Link.eps')
if kwargs['bsave']:
plt.savefig('Link'+str(kwargs['ix'])+'.png')
plt.close()
plt.axis('auto')
return fig,ax
def _show3(self,rays=True, lay= True, ant= True, newfig= False, **kwargs):
""" display the simulation scene using Mayavi
Parameters
----------
rays: boolean
lay : boolean
ant : boolean
newfig : boolean (default : False)
kwargs of Rays.show3()
see also
--------
pylayers.gis.layout
pylayers.antprop.antenna
pylayers.antprop.rays
Examples
--------
>>> from pylayers.simul.link import *
>>> L=DLink()
>>> L.eval()
"""
if not newfig:
self._maya_fig=mlab.gcf()
else:
self._maya_fig=mlab.figure(bgcolor=(1,1,1),fgcolor=(0,0,0))
if 'scale' in kwargs:
scale = kwargs.pop('scale')
else:
scale = 0.5
if 'centered' in kwargs:
centered = kwargs['centered']
else :
centered = False
if centered:
pg = np.zeros((3))
pg[:2] = self.L.pg
if centered:
ptx = self.a-pg
prx = self.b-pg
else :
ptx = self.a
prx = self.b
self._maya_fig.scene.disable_render = True
if ant :
Atx = self.Aa
Arx = self.Ab
Ttx = self.Ta
Trx = self.Tb
# evaluate antenna if required
if not Atx.evaluated:
Atx.eval()
Atx._show3(T=Ttx.reshape(3,3),
po=ptx,
title=False,
bcolorbar=False,
bnewfig=False,
bcircle = False,
name = Atx._filename,
scale= scale,
binteract=False)
if not Arx.evaluated:
Arx.eval()
Arx._show3(T=Trx.reshape(3,3),
po=prx,
title=False,
bcolorbar=False,
bnewfig=False,
bcircle = False,
name = Arx._filename,
scale= scale,
binteract=False)
if lay:
if self.L.typ == 'outdoor':
show_ceil = False
opacity = 1.
ceil_opacity = 1.
elif self.L.typ == 'indoor':
show_ceil = True
opacity = 0.7
ceil_opacity = 0.
self._maya_fig = self.L._show3(newfig=False,
opacity=opacity,
ceil_opacity=ceil_opacity,
show_ceil=show_ceil,
centered=centered,**kwargs)
# mlab.text3d(self.a[0],self.a[1],self.a[2],'a',
# scale=1,
# color=(1,0,0))
# mlab.text3d(self.b[0],self.b[1],self.b[2],'b',
# scale=1,
# color=(1,0,0))
if rays :
# check rays with energy
# if hasattr(self,'H') and not kwargs.has_key('rlist'):
# urays = np.where(self.H.y!=0)[0]
# kwargs['rlist']=urays
# import ipdb
# ipdb.set_trace()
if hasattr(self,'R'):
if self.H.y.ndim>2:
ER = np.squeeze(self.H.energy())
kwargs['ER']=ER
self.R._show3(L=[],**kwargs)
fp = (self.a+self.b)/2.
dab = np.sqrt(np.sum((self.a-self.b)**2))
mlab.view(focalpoint=fp)#,distance=15*dab-55)
self._maya_fig.scene.disable_render = False
mlab.orientation_axes()
mlab.show()
return self._maya_fig
#return(self._maya_fig)
def _update_show3(self,ant='a',delrays=False):
"""
"""
view=mlab.view()
antenna = eval('self.A'+ant)
rot = eval('self.T'+ant).reshape(3,3)
pos = eval('self.'+ant)
#if not antenna.full_evaluated:
# if not antenna.full_evaluated:
if not antenna.evaluated:
antenna.eval()
if hasattr(antenna,'_mayamesh'):
# antenna.eval()
x, y, z, k, scalar = antenna._computemesh(T=rot,po=pos,scale= 0.5)
antenna._mayamesh.mlab_source.set(x=x,y=y,z=z,scalars=scalar)
else:
antenna._show3(T=rot,po=pos,
title=False,
bcolorbar=False,
bcircle = False,
bnewfig=False,
scale= 0.5,
name = antenna._filename,
binteract=False)
if delrays:
import time
for x in self._maya_fig.children[::-1]:
if 'Rays' in x.name:
x.remove()
mlab.view(view[0],view[1],view[2],view[3])
# [x.remove() for x in self._maya_fig.children ]
# # update wall opaccity
# ds =[i for i in self._maya_fig.children if self.L._filename in i.name][0]
# a_in = self.L.Gt.node[self.ca]['indoor']
# b_in = self.L.Gt.node[self.cb]['indoor']
# if
# if a_in or b_in:
# # indoor situation
# ds.children[0].children[0].actor.property.opacity=0.5
# else:
# ds.children[0].children[0].actor.property.opacity=1.
def plt_cir(self,**kwargs):
""" plot link channel impulse response
Parameters
----------
BWGHz : Bandwidth
Nf : Number of frequency points
fftshift : boolean
rays : boolean
display rays contributors
fspl : boolean
display free space path loss
Returns
-------
fig,ax
See Also
--------
pylayers.antprop.channel.Tchannel.getcir
"""
defaults = {'fig' : [],
'ax' : [],
'BWGHz' :5,
'Nf' :1000,
'rays' :True,
'fspl' :True,
'vmin' :-120,
'vmax' : -40,
'taumin': 0,
'taumax': 160,
'bgrid':True,
'cmap':'jet',
'ix' : 0,
'bsave' : False,
'fontsize':18
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['fig'] == []:
fig = plt.gcf()
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = plt.gca()
else:
ax = kwargs['ax']
fontsize = kwargs.pop('fontsize')
vmin = kwargs.pop('vmin')
vmax = kwargs.pop('vmax')
taumin = kwargs.pop('taumin')
taumax = kwargs.pop('taumax')
#taumax = self.H.taud.max()
BWGHz = kwargs['BWGHz']
Nf = np.maximum(kwargs['Nf'],taumax*BWGHz).astype(int)
# getcir is a Tchannel method
self.ir = self.H.getcir(BWGHz=BWGHz, Nf=Nf)
self.ir.plot(fig=fig, ax=ax, fontsize=fontsize)
ax.set_ylim(vmin,vmax)
delay = self.ir.x
delay = delay[delay>0]
dist = delay*0.3
FSPL0 = -32.4- 20*np.log10(self.fGHz[0])-20*np.log10(dist)
FSPLG = FSPL0 + self.Aa.GdBmax[0] + self.Ab.GdBmax[0]
if kwargs['fspl']:
# Free space path loss
ax.plot(delay,FSPL0,linewidth=2,color='b',label='FSPL')
# Free space path loss + gain
ax.plot(delay,FSPLG,linewidth=3,color='k',label='FSPL+Gtmax+Grmax')
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
if kwargs['bgrid']:
ax.grid()
if kwargs['rays']:
# energy of each ray normaized between vmin(0) and vmax(1)
ER = np.squeeze(self.H.energy())
uER = ER.argsort()[::-1]
ER = ER[uER]
ERdB = 10*np.log10(ER)
ERdB = np.minimum(ERdB,vmax)
ERdB = np.maximum(ERdB,vmin)
colors = (ERdB-vmin)/(vmax-vmin)
#color_range = np.linspace( 0, 1., len(ER))#np.linspace( 0, np.pi, len(ER))
# sort rays by increasing energy
#colors = color_range[uER]
# most important rays , it=0 ir=0 , if =0
ax.scatter(self.H.taud[uER],ERdB,c=colors,s=200*colors,cmap=kwargs['cmap'],vmin=0,vmax=1)
#ax.set_xlim([min(self.H.taud)-10,max(self.H.taud)+10])
ax.set_xlim(taumin,taumax)
ax.legend(fontsize=fontsize)
if kwargs['bsave']:
plt.savefig('cir'+str(kwargs['ix'])+'.png')
plt.close()
return fig,ax
def plt_doa(self,**kwargs):
"""plot direction of arrival and departure
Parameters
----------
fig : plt.figure
ax : plt.axis
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
title : bool
See Also
--------
pylayers.antprop.channel.Tchannel.plotd
"""
kwargs['d']='doa'
return self.H.plotd(**kwargs)
def plt_dod(self,**kwargs):
"""plot direction of arrival and departure
Parameters
----------
fig : plt.figure
ax : plt.axis
phi: tuple (-180, 180)
phi angle
normalize: bool
energy normalized
reverse : bool
inverse theta and phi represenation
polar : bool
polar representation
cmap: matplotlib.cmap
mode: 'center' | 'mean' | 'in'
see bsignal.energy
s : float
scatter dot size
fontsize: float
edgecolors: bool
colorbar: bool
title : bool
See Also
--------
pylayers.antprop.channel.Tchannel.plotd
"""
kwargs['d']='dod'
return self.H.plotd(**kwargs)
def plt_dspread(self,**kwargs):
""" plot delay spread
"""
defaults = { 'fig':[],
'ax':[]
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig = plt.gcf()
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = plt.gca()
else:
ax = kwargs['ax']
ax.hist(self.H.taud,bins=len(self.H.taud)/2)
ax.set_xlim([0,max(self.H.taud)])
return fig,ax
def plt_aspread(self,**kwargs):
""" plot angular spread
"""
defaults = { 'fig':[],
'ax':[]
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if kwargs['fig'] == []:
fig = plt.gcf()
else:
fig = kwargs['fig']
if kwargs['ax'] == []:
ax = plt.gca()
else:
ax = kwargs['ax']
ax.hist(self.H.doa[:,0],bins=len(self.H.doa[:,0])/2)
ax.set_xlim([-np.pi,np.pi])
return fig,ax
def _autocufoff(self):
""" automatically determine minimum cutoff
See Also
--------
pylayers.antprop.loss.losst
pylayers.gis.layout.angleonlink3
"""
v = np.vectorize( lambda t:self.L.Gs.node[t]['name'])
# determine incidence angles on segment crossing p1-p2 segment
#data = L.angleonlink(p1,p2)
if np.allclose(self.a,self.b):
self.cutoff = 2
else:
data = self.L.angleonlink3(self.a,self.b)
# as many slabs as segments and subsegments
us = data['s']
if len(us) >0:
sl = v(us)
uus = np.where((sl != 'AIR') & (sl != '_AIR'))[0]
self.cutoff = len(uus)
else:
self.cutoff = 2
return self.cutoff
if (__name__ == "__main__"):
#plt.ion()
doctest.testmod()
| mit |
pkruskal/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
ContinuumIO/blaze | blaze/compute/sql.py | 3 | 49986 | """
>>> from blaze import *
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from sqlalchemy import Table, Column, MetaData, Integer, String
>>> t = Table('accounts', MetaData(),
... Column('name', String, primary_key = True),
... Column('amount', Integer))
>>> print(compute(deadbeats, t)) # doctest: +SKIP
SELECT accounts.name
FROM accounts
WHERE accounts.amount < :amount_1
"""
from __future__ import absolute_import, division, print_function
from collections import Iterable
from copy import copy
import datetime
import itertools
from itertools import chain
from operator import and_, eq, attrgetter
import warnings
from datashape import TimeDelta, Option, int32
from datashape.predicates import iscollection, isscalar, isrecord
import numpy as np
import numbers
from odo.backends.sql import metadata_of_engine, dshape_to_alchemy
from multipledispatch import MDNotImplementedError
import sqlalchemy as sa
from sqlalchemy import sql, Table, MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.sql import Selectable, Select, functions as safuncs
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.elements import ClauseElement, ColumnElement, ColumnClause
from sqlalchemy.sql.selectable import FromClause, ScalarSelect
import toolz
from toolz import unique, concat, pipe, first
from toolz.compatibility import zip
from toolz.curried import map
from .core import compute_up, compute, base
from .varargs import VarArgs
from ..compatibility import reduce, basestring, _inttypes
from ..dispatch import dispatch
from ..expr import (
BinOp,
BinaryMath,
Broadcast,
By,
Coalesce,
Coerce,
Concat,
DateTime,
DateTimeTruncate,
Distinct,
Expr,
Field,
FloorDiv,
Head,
IsIn,
Join,
Label,
Like,
Merge,
Pow,
Projection,
ReLabel,
Reduction,
Sample,
Selection,
Shift,
Slice,
Sort,
Sub,
Summary,
Tail,
UnaryOp,
UnaryStringFunction,
common_subexpression,
count,
greatest,
least,
mean,
nelements,
notnull,
nunique,
reductions,
std,
StrCat,
StrFind,
StrSlice,
var,
)
from ..expr.datetime import dayofweek
from ..expr.strings import len as str_len
from ..expr.broadcast import broadcast_collect
from ..expr.math import isnan
from ..utils import listpack
__all__ = ['sa', 'select']
def unsafe_inner_columns(s):
"""Return the inner columns of a selectable if present, otherwise return
the columns.
columns.
Parameters
----------
s : Selectable
The selectable to extract the columns from.
Returns
-------
inner_columns : iterable[ColumnClause]
The inner columns if present, otherwise the columns.
Notes
-----
This function is unsafe because it can drop important clauses from a column
expression. The use of this function has caused countless hours of
debugging and sadness. Please think very carefully about how this will
interact with selects with where or order by clauses. If you are unsure,
generate worse sql that works in all cases.
See Also
--------
inner_columns
"""
try:
return s.inner_columns
except AttributeError:
return s.columns
_preserved_attributes = frozenset({
'_from_obj',
'_whereclause',
'_distinct',
'_group_by_clause',
'_having',
'_limit',
'_offset',
'_order_by_clause',
})
def inner_columns(s):
"""Return the inner columns if it is safe to do so, otherwise return the
columns.
Parameters
----------
s : Selectable
The selectable to extract the columns from.
Returns
-------
inner_columns : iterable[ColumnClause]
The inner columns if it was safe to use them, otherwise the columns.
Notes
-----
This function can lead to unneeded extra joins when merging columns
together. You may want to look into
:func:`~blaze.compute.sql.reconstruct_select` or
:func:`~blaze.compute.sql.unify_wheres` to make sure you are merging
columns correctly.
See Also
--------
unsafe_inner_columns
"""
for attr in _preserved_attributes:
if hasattr(s, attr):
return s.columns
return unsafe_inner_columns(s)
@dispatch(Projection, Select)
def compute_up(expr, data, **kwargs):
d = dict((c.name, c) for c in getattr(data, 'inner_columns', data.c))
return data.with_only_columns([d[field] for field in expr.fields])
@dispatch(Projection, Selectable)
def compute_up(expr, data, scope=None, **kwargs):
return compute(
expr,
toolz.merge(scope, {expr._child: sa.select([data])}),
post_compute=False,
return_type='native',
)
@dispatch(Projection, sa.Column)
def compute_up(expr, data, scope=None, **kwargs):
selectables = [
compute(
expr._child[field],
scope,
post_compute=False,
return_type='native',
)
for field in expr.fields
]
froms = set(concat(s.froms for s in selectables))
assert 1 <= len(froms) <= 2, 'len(froms) = %s' % len(froms)
result = unify_froms(sa.select(first(sel.inner_columns)
for sel in selectables),
froms)
return result.where(unify_wheres(selectables))
@dispatch(Projection, Select)
def compute_up(expr, data, scope=None, **kwargs):
return data.with_only_columns(
first(compute(
expr._child[field],
scope,
post_compute=False,
return_type='native',
).inner_columns)
for field in expr.fields
)
@dispatch(Field, FromClause)
def compute_up(t, s, **kwargs):
return s.c[t._name]
def unify_froms(select, selectables):
return reduce(lambda x, y: x.select_from(y), selectables, select)
def unify_wheres(selectables):
clauses = list(unique((s._whereclause for s in selectables
if hasattr(s, '_whereclause')), key=str))
return reduce(and_, clauses) if clauses else None
@dispatch(Field, Select)
def compute_up(expr, data, scope=None, **kwargs):
name = expr._name
try:
inner_columns = list(data.inner_columns)
names = list(c.name for c in data.inner_columns)
column = inner_columns[names.index(name)]
except (KeyError, ValueError):
single_column_select = compute(
expr,
toolz.merge(scope, {expr._child: first(data.inner_columns)}),
post_compute=False,
return_type='native',
)
column = first(single_column_select.inner_columns)
result = unify_froms(sa.select([column]),
data.froms + single_column_select.froms)
return result.where(unify_wheres([data, single_column_select]))
else:
return data.with_only_columns([column])
@dispatch(Field, sa.Column)
def compute_up(t, s, **kwargs):
assert len(s.foreign_keys) == 1, 'exactly one foreign key allowed'
key_col = first(s.foreign_keys).column
return sa.select([key_col.table.c[t._name]]).where(s == key_col)
@dispatch(Broadcast, Select)
def compute_up(t, s, **kwargs):
cols = list(unsafe_inner_columns(s))
d = dict((t._scalars[0][c], cols[i])
for i, c in enumerate(t._scalars[0].fields))
result = compute(
t._scalar_expr,
d,
post_compute=False,
return_type='native',
).label(t._name)
s = copy(s)
s.append_column(result)
return s.with_only_columns([result])
@dispatch(Broadcast, Selectable)
def compute_up(t, s, **kwargs):
cols = list(unsafe_inner_columns(s))
d = dict((t._scalars[0][c], cols[i])
for i, c in enumerate(t._scalars[0].fields))
return compute(
t._scalar_expr,
d,
post_compute=False,
return_type='native',
).label(t._name)
@dispatch(Concat, (Select, Selectable), (Select, Selectable))
def compute_up(t, lhs, rhs, **kwargs):
if t.axis != 0:
raise ValueError(
'Cannot concat along a non-zero axis in sql; perhaps you want'
" 'merge'?",
)
return select(lhs).union_all(select(rhs)).alias()
@dispatch(Broadcast, ColumnElement)
def compute_up(t, s, **kwargs):
expr = t._scalar_expr
return compute(
expr,
s,
post_compute=False,
return_type='native',
).label(expr._name)
def _binop(type_, f):
@dispatch(type_, ColumnElement)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return f(t, t.lhs, data)
@dispatch(type_, Select)
def compute_up(t, data, **kwargs):
assert len(data.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic'
)
column = first(unsafe_inner_columns(data))
if isinstance(t.lhs, Expr):
return f(t, column, t.rhs)
else:
return f(t, t.lhs, column)
@compute_up.register(type_,
(Select, ColumnElement, base),
(Select, ColumnElement))
@compute_up.register(type_,
(Select, ColumnElement),
base)
def binop_sql(t, lhs, rhs, **kwargs):
if isinstance(lhs, Select):
assert len(lhs.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic, got %r' % lhs
)
lhs = first(unsafe_inner_columns(lhs))
if isinstance(rhs, Select):
assert len(rhs.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic, got %r' % rhs
)
rhs = first(unsafe_inner_columns(rhs))
return f(t, lhs, rhs)
_binop(BinOp, lambda expr, lhs, rhs: expr.op(lhs, rhs))
_binop(
(greatest, least),
lambda expr, lhs, rhs: getattr(sa.func, type(expr).__name__)(lhs, rhs),
)
@dispatch(Pow, ColumnElement)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return sa.func.pow(data, t.rhs)
else:
return sa.func.pow(t.lhs, data)
@dispatch(Pow, Select)
def compute_up(t, data, **kwargs):
assert len(data.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic, got %r' % data
)
column = first(data.inner_columns)
if isinstance(t.lhs, Expr):
return sa.func.pow(column, t.rhs)
else:
return sa.func.pow(t.lhs, column)
@compute_up.register(Pow, (ColumnElement, base), ColumnElement)
@compute_up.register(Pow, ColumnElement, base)
def binop_sql_pow(t, lhs, rhs, **kwargs):
return sa.func.pow(lhs, rhs)
@dispatch(BinaryMath, ColumnElement)
def compute_up(t, data, **kwargs):
op = getattr(sa.func, type(t).__name__)
if isinstance(t.lhs, Expr):
return op(data, t.rhs)
else:
return op(t.lhs, data)
@dispatch(BinaryMath, Select)
def compute_up(t, data, **kwargs):
assert len(data.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic, got %r' % data
)
column = first(data.inner_columns)
op = getattr(sa.func, type(t).__name__)
if isinstance(t.lhs, Expr):
return op(column, t.rhs)
else:
return op(t.lhs, column)
@compute_up.register(BinaryMath, (ColumnElement, base), ColumnElement)
@compute_up.register(BinaryMath, ColumnElement, base)
def binary_math_sql(t, lhs, rhs, **kwargs):
return getattr(sa.func, type(t).__name__)(lhs, rhs)
@compute_up.register(BinaryMath, Select, base)
def binary_math_sql_select(t, lhs, rhs, **kwargs):
left, right = first(lhs.inner_columns), rhs
result = getattr(sa.func, type(t).__name__)(left, right)
return reconstruct_select([result], lhs)
@compute_up.register(BinaryMath, base, Select)
def binary_math_sql_select(t, lhs, rhs, **kwargs):
left, right = lhs, first(rhs.inner_columns)
result = getattr(sa.func, type(t).__name__)(left, right)
return reconstruct_select([result], rhs)
@compute_up.register(BinaryMath, Select, Select)
def binary_math_sql_select(t, lhs, rhs, **kwargs):
left, right = first(lhs.inner_columns), first(rhs.inner_columns)
result = getattr(sa.func, type(t).__name__)(left, right)
assert lhs.table == rhs.table, '%s != %s' % (lhs.table, rhs.table)
return reconstruct_select([result], lhs.table)
@dispatch(FloorDiv, ColumnElement)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return sa.func.floor(data / t.rhs)
else:
return sa.func.floor(t.rhs / data)
@compute_up.register(FloorDiv, (ColumnElement, base), ColumnElement)
@compute_up.register(FloorDiv, ColumnElement, base)
def binop_sql(t, lhs, rhs, **kwargs):
return sa.func.floor(lhs / rhs)
@dispatch(isnan, ColumnElement)
def compute_up(t, s, **kwargs):
return s == float('nan')
@dispatch(UnaryOp, ColumnElement)
def compute_up(t, s, **kwargs):
sym = t.symbol
return getattr(t, 'op', getattr(safuncs, sym, getattr(sa.func, sym)))(s)
@dispatch(Selection, sa.sql.ColumnElement)
def compute_up(expr, data, scope=None, **kwargs):
predicate = compute(
expr.predicate,
data,
post_compute=False,
return_type='native',
)
return compute(
expr,
{expr._child: data, expr.predicate: predicate},
return_type='native',
**kwargs
)
@dispatch(Selection, sa.sql.ColumnElement, ColumnElement)
def compute_up(expr, col, predicate, **kwargs):
return sa.select([col]).where(predicate)
@dispatch(Selection, Selectable)
def compute_up(expr, sel, scope=None, **kwargs):
return compute(
expr,
{
expr._child: sel,
expr.predicate: compute(
expr.predicate,
toolz.merge(
{
expr._child[col.name]: col
for col in getattr(sel, 'inner_columns', sel.columns)
},
scope,
),
optimize=False,
post_compute=False,
),
},
return_type='native',
**kwargs
)
@dispatch(Selection, Selectable, ColumnElement)
def compute_up(expr, tbl, predicate, scope=None, **kwargs):
try:
return tbl.where(predicate)
except AttributeError:
return select([tbl]).where(predicate)
@dispatch(Selection, Selectable, Selectable)
def compute_up(expr, tbl, predicate, **kwargs):
col, = unsafe_inner_columns(predicate)
return reconstruct_select(
unsafe_inner_columns(tbl),
tbl,
whereclause=unify_wheres((tbl, predicate)),
).where(col)
def select(s):
""" Permissive SQL select
Idempotent sa.select
Wraps input in list if neccessary
"""
if not isinstance(s, sa.sql.Select):
if not isinstance(s, (tuple, list)):
s = [s]
s = sa.select(s)
return s
table_names = ('table_%d' % i for i in itertools.count(1))
def name(sel):
""" Name of a selectable """
if hasattr(sel, 'name'):
return sel.name
if hasattr(sel, 'froms'):
if len(sel.froms) == 1:
return name(sel.froms[0])
return next(table_names)
@dispatch(Select, Select)
def _join_selectables(a, b, condition=None, **kwargs):
return a.join(b, condition, **kwargs)
@dispatch(Select, ClauseElement)
def _join_selectables(a, b, condition=None, **kwargs):
if len(a.froms) > 1:
raise MDNotImplementedError()
return a.replace_selectable(a.froms[0],
a.froms[0].join(b, condition, **kwargs))
@dispatch(ClauseElement, Select)
def _join_selectables(a, b, condition=None, **kwargs):
if len(b.froms) > 1:
raise MDNotImplementedError()
return b.replace_selectable(b.froms[0],
a.join(b.froms[0], condition, **kwargs))
@dispatch(ClauseElement, ClauseElement)
def _join_selectables(a, b, condition=None, **kwargs):
return a.join(b, condition, **kwargs)
_getname = attrgetter('name')
def _clean_join_name(opposite_side_colnames, suffix, c):
if c.name not in opposite_side_colnames:
return c
else:
return c.label(c.name + suffix)
@dispatch(Join, ClauseElement, ClauseElement)
def compute_up(t, lhs, rhs, **kwargs):
if isinstance(lhs, ColumnElement):
lhs = select(lhs)
if isinstance(rhs, ColumnElement):
rhs = select(rhs)
if name(lhs) == name(rhs):
left_suffix, right_suffix = t.suffixes
lhs = lhs.alias('%s%s' % (name(lhs), left_suffix))
rhs = rhs.alias('%s%s' % (name(rhs), right_suffix))
lhs = alias_it(lhs)
rhs = alias_it(rhs)
if isinstance(lhs, Select):
lhs = lhs.alias(next(aliases))
left_conds = [lhs.c.get(c) for c in listpack(t.on_left)]
else:
ldict = dict((c.name, c) for c in unsafe_inner_columns(lhs))
left_conds = [ldict.get(c) for c in listpack(t.on_left)]
if isinstance(rhs, Select):
rhs = rhs.alias(next(aliases))
right_conds = [rhs.c.get(c) for c in listpack(t.on_right)]
else:
rdict = dict((c.name, c) for c in unsafe_inner_columns(rhs))
right_conds = [rdict.get(c) for c in listpack(t.on_right)]
condition = reduce(and_, map(eq, left_conds, right_conds))
# Perform join
if t.how == 'inner':
join = _join_selectables(lhs, rhs, condition=condition)
main = lhs
elif t.how == 'left':
main, other = lhs, rhs
join = _join_selectables(lhs, rhs, condition=condition, isouter=True)
elif t.how == 'right':
join = _join_selectables(rhs, lhs, condition=condition, isouter=True)
main = rhs
else:
# http://stackoverflow.com/questions/20361017/sqlalchemy-full-outer-join
raise ValueError("SQLAlchemy doesn't support full outer Join")
"""
We now need to arrange the columns in the join to match the columns in
the expression. We care about order and don't want repeats
"""
if isinstance(join, Select):
def cols(x):
if isinstance(x, Select):
return list(x.inner_columns)
else:
return list(x.columns)
else:
cols = lambda x: list(x.columns)
main_cols = cols(main)
left_cols = cols(lhs)
left_names = set(map(_getname, left_cols))
right_cols = cols(rhs)
right_names = set(map(_getname, right_cols))
left_suffix, right_suffix = t.suffixes
fields = [
f.replace(left_suffix, '').replace(right_suffix, '') for f in t.fields
]
columns = [c for c in main_cols if c.name in t._on_left]
columns += [_clean_join_name(right_names, left_suffix, c)
for c in left_cols
if c.name in fields and c.name not in t._on_left]
columns += [_clean_join_name(left_names, right_suffix, c)
for c in right_cols
if c.name in fields and c.name not in t._on_right]
if isinstance(join, Select):
return join.with_only_columns(columns)
else:
return sa.select(columns, from_obj=join)
names = {
mean: 'avg'
}
def reconstruct_select(columns, original, **kwargs):
return sa.select(columns,
from_obj=kwargs.pop('from_obj',
getattr(original, '_from_obj', None)),
whereclause=kwargs.pop('whereclause',
getattr(original,
'_whereclause', None)),
bind=kwargs.pop('bind', original.bind),
distinct=kwargs.pop('distinct',
getattr(original,
'_distinct', False)),
group_by=kwargs.pop('group_by',
getattr(original,
'_group_by_clause', None)),
having=kwargs.pop('having',
getattr(original, '_having', None)),
limit=kwargs.pop('limit',
getattr(original, '_limit', None)),
offset=kwargs.pop('offset',
getattr(original, '_offset', None)),
order_by=kwargs.pop('order_by',
getattr(original,
'_order_by_clause', None)),
**kwargs)
@dispatch((nunique, Reduction), Select)
def compute_up(expr, data, **kwargs):
if expr.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
data = data.alias(name=next(aliases))
cols = list(unsafe_inner_columns(data))
d = dict((expr._child[c], cols[i])
for i, c in enumerate(expr._child.fields))
return select([
compute(expr, d, post_compute=False, return_type='native')
])
@dispatch(Distinct, ColumnElement)
def compute_up(t, s, **kwargs):
return s.distinct(*t.on).label(t._name)
@dispatch(Distinct, Select)
def compute_up(t, s, **kwargs):
return s.distinct(*t.on)
@dispatch(Distinct, Selectable)
def compute_up(t, s, **kwargs):
return select(s).distinct(*t.on)
@dispatch(Reduction, ClauseElement)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
try:
op = getattr(sa.sql.functions, t.symbol)
except AttributeError:
op = getattr(sa.sql.func, names.get(type(t), t.symbol))
return op(s).label(t._name)
prefixes = {
std: 'stddev',
var: 'var'
}
@dispatch((std, var), sql.elements.ColumnElement)
def compute_up(t, s, **kwargs):
measure = t.schema.measure
is_timedelta = isinstance(getattr(measure, 'ty', measure), TimeDelta)
if is_timedelta:
# part 1 of 2 to work around the fact that postgres does not have
# timedelta var or std: cast to a double which is seconds
s = sa.extract('epoch', s)
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
funcname = 'samp' if t.unbiased else 'pop'
full_funcname = '%s_%s' % (prefixes[type(t)], funcname)
ret = getattr(sa.func, full_funcname)(s)
if is_timedelta:
# part 2 of 2 to work around the fact that postgres does not have
# timedelta var or std: cast back from seconds by
# multiplying by a 1 second timedelta
ret = ret * datetime.timedelta(seconds=1)
return ret.label(t._name)
@dispatch(count, Selectable)
def compute_up(t, s, **kwargs):
return s.count()
@dispatch(count, sa.Table)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
try:
c = list(s.primary_key)[0]
except IndexError:
c = list(s.columns)[0]
return sa.func.count(c)
@dispatch(nelements, (Select, ClauseElement))
def compute_up(t, s, **kwargs):
return compute_up(t._child.count(), s)
@dispatch(count, Select)
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
al = next(aliases)
try:
s2 = s.alias(al)
col = list(s2.primary_key)[0]
except (KeyError, IndexError):
s2 = s.alias(al)
col = list(s2.columns)[0]
result = sa.func.count(col)
return select([list(unsafe_inner_columns(result))[0].label(t._name)])
@dispatch(nunique, (sa.sql.elements.Label, sa.Column))
def compute_up(t, s, **kwargs):
if t.axis != (0,):
raise ValueError('axis not equal to 0 not defined for SQL reductions')
return sa.func.count(s.distinct())
@dispatch(nunique, Selectable)
def compute_up(expr, data, **kwargs):
return select(data).distinct().alias(next(aliases)).count()
@dispatch(By, sa.Column)
def compute_up(expr, data, scope=None, **kwargs):
data = lower_column(data)
grouper = compute(
expr.grouper,
scope,
post_compute=False,
return_type='native',
**kwargs
)
app = expr.apply
reductions = [
compute(
val,
data,
post_compute=None,
return_type='native',
).label(name)
for val, name in zip(app.values, app.fields)
]
froms = list(unique(chain(get_all_froms(grouper),
concat(map(get_all_froms, reductions)))))
inner_cols = list(getattr(grouper, 'inner_columns', [grouper]))
grouper_cols = inner_cols[:]
inner_cols.extend(concat(
getattr(getattr(r, 'element', None), 'inner_columns', [r])
for r in reductions
))
wheres = unify_wheres([grouper] + reductions)
sel = unify_froms(sa.select(inner_cols, whereclause=wheres), froms)
return sel.group_by(*grouper_cols)
@dispatch(By, ClauseElement)
def compute_up(expr, data, **kwargs):
if not valid_grouper(expr.grouper):
raise TypeError("Grouper must have a non-nested record or one "
"dimensional collection datashape, "
"got %s of type %r with dshape %s" %
(expr.grouper, type(expr.grouper).__name__,
expr.grouper.dshape))
grouper = get_unsafe_inner_columns(
compute(
expr.grouper,
data,
post_compute=False,
return_type='native',
),
)
app = expr.apply
reductions = [
compute(
val,
data,
post_compute=False,
return_type='native',
).label(name)
for val, name in zip(app.values, app.fields)
]
return sa.select(grouper + reductions).group_by(*grouper)
def lower_column(col):
""" Return column from lower level tables if possible
>>> metadata = sa.MetaData()
>>> s = sa.Table('accounts', metadata,
... sa.Column('name', sa.String),
... sa.Column('amount', sa.Integer),
... sa.Column('id', sa.Integer, primary_key=True),
... )
>>> s2 = select([s])
>>> s2.c.amount is s.c.amount
False
>>> lower_column(s2.c.amount) is s.c.amount
True
>>> lower_column(s2.c.amount)
Column('amount', Integer(), table=<accounts>)
"""
old = None
while col is not None and col is not old:
old = col
if not hasattr(col, 'table') or not hasattr(col.table, 'froms'):
return col
for f in col.table.froms:
if f.corresponding_column(col) is not None:
col = f.corresponding_column(col)
return old
aliases = ('alias_%d' % i for i in itertools.count(1))
@toolz.memoize
def alias_it(s):
""" Alias a Selectable if it has a group by clause """
if (hasattr(s, '_group_by_clause') and
s._group_by_clause is not None and
len(s._group_by_clause)):
return s.alias(next(aliases))
else:
return s
def is_nested_record(measure):
"""Predicate for checking whether `measure` is a nested ``Record`` dshape
Examples
--------
>>> from datashape import dshape
>>> is_nested_record(dshape('{a: int32, b: int32}').measure)
False
>>> is_nested_record(dshape('{a: var * ?float64, b: ?string}').measure)
True
"""
if not isrecord(measure):
raise TypeError('Input must be a Record type got %s of type %r' %
(measure, type(measure).__name__))
return not all(isscalar(getattr(t, 'key', t)) for t in measure.types)
def valid_grouper(expr):
ds = expr.dshape
measure = ds.measure
return (iscollection(ds) and
(isscalar(getattr(measure, 'key', measure)) or
(isrecord(measure) and not is_nested_record(measure))))
def valid_reducer(expr):
ds = expr.dshape
measure = ds.measure
return (not iscollection(ds) and
(isscalar(measure) or
(isrecord(measure) and not is_nested_record(measure))))
@dispatch(By, Select)
def compute_up(expr, data, **kwargs):
if not valid_grouper(expr.grouper):
raise TypeError("Grouper must have a non-nested record or one "
"dimensional collection datashape, "
"got %s of type %r with dshape %s" %
(expr.grouper, type(expr.grouper).__name__,
expr.grouper.dshape))
s = alias_it(data)
if valid_reducer(expr.apply):
reduction = compute(
expr.apply,
s,
post_compute=False, return_type='native',
)
else:
raise TypeError('apply must be a Summary expression')
grouper = get_unsafe_inner_columns(compute(
expr.grouper,
s,
post_compute=False,
return_type='native',
))
reduction_columns = pipe(reduction.inner_columns,
map(get_unsafe_inner_columns),
concat)
columns = list(unique(chain(grouper, reduction_columns)))
if (not isinstance(s, sa.sql.selectable.Alias) or
(hasattr(s, 'froms') and isinstance(s.froms[0],
sa.sql.selectable.Join))):
assert len(s.froms) == 1, 'only a single FROM clause supported for now'
from_obj, = s.froms
else:
from_obj = None
return reconstruct_select(columns,
getattr(s, 'element', s),
from_obj=from_obj,
group_by=grouper)
@dispatch(Sort, (Selectable, Select))
def compute_up(t, s, **kwargs):
s = select(s.alias())
direction = sa.asc if t.ascending else sa.desc
cols = [direction(lower_column(s.c[c])) for c in listpack(t.key)]
return s.order_by(*cols)
@dispatch(Sort, (sa.Table, ColumnElement))
def compute_up(t, s, **kwargs):
s = select(s)
direction = sa.asc if t.ascending else sa.desc
cols = [direction(lower_column(s.c[c])) for c in listpack(t.key)]
return s.order_by(*cols)
def _samp_compute_up(t, s, **kwargs):
if t.n is not None:
limit = t.n
else:
limit = sa.select([safuncs.count() * t.frac],
from_obj=s.alias()).as_scalar()
return s.order_by(safuncs.random()).limit(limit)
@dispatch(Sample, sa.Table)
def compute_up(t, s, **kwargs):
return _samp_compute_up(t, select(s), **kwargs)
@dispatch(Sample, ColumnElement)
def compute_up(t, s, **kwargs):
return _samp_compute_up(t, sa.select([s]), **kwargs)
@dispatch(Sample, FromClause)
def compute_up(t, s, **kwargs):
return _samp_compute_up(t, s, **kwargs)
@dispatch(Head, FromClause)
def compute_up(t, s, **kwargs):
if s._limit is not None and s._limit <= t.n:
return s
return s.limit(t.n)
@dispatch(Head, sa.Table)
def compute_up(t, s, **kwargs):
return s.select().limit(t.n)
@dispatch(Head, ColumnElement)
def compute_up(t, s, **kwargs):
return sa.select([s]).limit(t.n)
@dispatch(Head, ScalarSelect)
def compute_up(t, s, **kwargs):
return compute(t, s.element, post_compute=False, return_type='native')
@dispatch(Label, ColumnElement)
def compute_up(t, s, **kwargs):
return s.label(t.label)
@dispatch(Label, FromClause)
def compute_up(t, s, **kwargs):
assert len(s.c) == 1, \
'expected %s to have a single column but has %d' % (s, len(s.c))
inner_column, = s.inner_columns
return reconstruct_select([inner_column.label(t.label)], s)
@dispatch(Expr, ScalarSelect)
def post_compute(t, s, **kwargs):
return s.element
@dispatch(ReLabel, Selectable)
def compute_up(expr, data, **kwargs):
names = data.c.keys()
assert names == expr._child.fields, (
'names = %r\nexpr._child.fields = %r' % (names, expr._child.fields)
)
d = dict(zip(names, getattr(data, 'inner_columns', data.c)))
return reconstruct_select(
(d[col].label(new_col) if col != new_col else d[col]
for col, new_col in zip(expr._child.fields, expr.fields)),
data,
)
@dispatch(FromClause)
def get_unsafe_inner_columns(sel):
try:
return list(sel.inner_columns)
except AttributeError:
return list(map(lower_column, sel.c.values()))
@dispatch(ColumnElement)
def get_unsafe_inner_columns(c):
return [c]
@dispatch(ScalarSelect)
def get_unsafe_inner_columns(sel):
inner_columns = list(sel.inner_columns)
assert len(inner_columns) == 1, 'ScalarSelect should have only ONE column'
return list(map(lower_column, inner_columns))
@dispatch(sa.sql.functions.Function)
def get_unsafe_inner_columns(f):
unique_columns = unique(concat(map(get_unsafe_inner_columns, f.clauses)))
lowered = [x.label(getattr(x, 'name', None)) for x in unique_columns]
return [getattr(sa.func, f.name)(*lowered)]
@dispatch(sa.sql.elements.Label)
def get_unsafe_inner_columns(label):
"""
Notes
-----
This should only ever return a list of length 1
This is because we need to turn ScalarSelects into an actual column
"""
name = label.name
inner_columns = get_unsafe_inner_columns(label.element)
assert len(inner_columns) == 1
return [lower_column(c).label(name) for c in inner_columns]
@dispatch(base, Expr)
def get_unsafe_inner_columns(b, expr):
return [sa.literal(b, dshape_to_alchemy(expr.dshape)).label(expr._name)]
@dispatch(object, Expr)
def get_unsafe_inner_columns(ob, expr):
return get_unsafe_inner_columns(ob)
@dispatch(Select)
def get_all_froms(sel):
return list(unique(sel.locate_all_froms()))
@dispatch(sa.Table)
def get_all_froms(t):
return [t]
@dispatch(sa.sql.elements.ColumnElement)
def get_all_froms(colelement):
return list(unique(concat(map(get_all_froms, colelement.get_children()))))
@dispatch((ScalarSelect, sa.sql.elements.Label))
def get_all_froms(element):
return get_all_froms(element.element)
@dispatch(sa.sql.functions.FunctionElement)
def get_all_froms(function):
return list(unique(concat(map(get_all_froms, function.clauses.clauses))))
@dispatch(ColumnClause)
def get_all_froms(c):
return [c.table]
@dispatch(base)
def get_all_froms(b):
return []
def get_clause(data, kind):
# arg SQLAlchemy doesn't allow things like data._group_by_clause or None
assert kind == 'order_by' or kind == 'group_by', \
'kind must be "order_by" or "group_by"'
clause = getattr(data, '_%s_clause' % kind, None)
return clause.clauses if clause is not None else None
@dispatch(Merge, VarArgs[sa.sql.ClauseElement, base])
def compute_up(expr, args, **kwargs):
from_objs = list(unique(concat(map(get_all_froms, args))))
if len(from_objs) > 1:
# TODO: how do you do this in sql? please send help
raise ValueError('only columns from the same table can be merged')
cols = list(unique(concat(map(get_unsafe_inner_columns, args, expr.args))))
sel = sa.select(cols, from_obj=from_objs[0])
where = unify_wheres(args)
if where is not None:
sel = sel.where(where)
return sel
@dispatch(Summary, Select)
def compute_up(t, s, scope=None, **kwargs):
d = dict((t._child[c], list(unsafe_inner_columns(s))[i])
for i, c in enumerate(t._child.fields))
cols = [
compute(
val,
toolz.merge(scope, d),
post_compute=None,
return_type='native',
).label(name)
for name, val in zip(t.fields, t.values)
]
s = copy(s)
for c in cols:
s.append_column(c)
return s.with_only_columns(cols)
@dispatch(Summary, ClauseElement)
def compute_up(t, s, **kwargs):
scope = {t._child: s}
return sa.select(
compute(
value,
scope,
post_compute=None,
return_type='native',
).label(name)
for value, name in zip(t.values, t.fields)
)
@dispatch(Like, Select)
def compute_up(t, s, **kwargs):
assert len(s.c) == 1, \
'Select cannot have more than a single column when filtering with `like`'
return compute_up(t, first(s.inner_columns), **kwargs)
@dispatch(Like, ColumnElement)
def compute_up(t, s, **kwargs):
return s.like(t.pattern.replace('*', '%').replace('?', '_'))
# TODO: remove if the alternative fix goes into PyHive
@compiles(sa.sql.functions.Function, 'hive')
def compile_char_length_on_hive(element, compiler, **kwargs):
assert len(element.clauses) == 1, \
'char_length must have a single clause, got %s' % list(element.clauses)
if element.name == 'char_length':
return compiler.visit_function(sa.func.length(*element.clauses),
**kwargs)
return compiler.visit_function(element, **kwargs)
@dispatch(str_len, ColumnElement)
def compute_up(expr, data, **kwargs):
return sa.sql.functions.char_length(data).label(expr._name)
@dispatch(StrSlice, ColumnElement)
def compute_up(expr, data, **kwargs):
if isinstance(expr.slice, _inttypes):
idx = expr.slice + 1
if idx < 1: # SQL string indexing is 1-based and positive.
msg = "Index {} out-of-bounds for SQL string indexing."
raise IndexError(msg.format(expr.slice))
args = idx, 1
elif isinstance(expr.slice, tuple):
start, stop, step = expr.slice
if step is not None:
msg = "step value {} not valid for SQL string indexing."
raise ValueError(msg.format(step))
norm_start = start if isinstance(start, _inttypes) else 0
if norm_start < 0:
msg = "Negative indexing not valid for SQL strings; given {}."
raise ValueError(msg.format(norm_start))
if isinstance(stop, _inttypes):
if stop < 0:
msg = "Negative indexing not valid for SQL strings; given {}."
raise ValueError(msg.format(stop))
args = norm_start + 1, (stop - norm_start)
elif stop is None:
args = norm_start + 1,
return sa.sql.func.substring(data, *args)
@dispatch(StrFind, ColumnElement)
def compute_up(expr, data, **kwargs):
sub = sa.sql.expression.literal(expr.sub)
return sa.sql.func.position(sub.op('in')(data))
@compute_up.register(StrCat, Select, basestring)
@compute_up.register(StrCat, basestring, Select)
def str_cat_sql(expr, lhs, rhs, **kwargs):
if isinstance(lhs, Select):
orig = lhs
lhs = first(lhs.inner_columns)
else:
orig = rhs
rhs = first(rhs.inner_columns)
if expr.sep:
result = (lhs + expr.sep + rhs).label(expr.lhs._name)
else:
result = (lhs + rhs).label(expr.lhs._name)
return reconstruct_select([result], orig)
@compute_up.register(StrCat, Select, Select)
def str_cat_sql(expr, lhs, rhs, **kwargs):
left, right = first(lhs.inner_columns), first(rhs.inner_columns)
if expr.sep:
result = (left + expr.sep + right).label(expr.lhs._name)
else:
result = (left + right).label(expr.lhs._name)
return reconstruct_select([result], lhs)
@compute_up.register(StrCat, (ColumnElement, basestring), ColumnElement)
@compute_up.register(StrCat, ColumnElement, basestring)
def str_cat_sql(expr, lhs, rhs, **kwargs):
if expr.sep:
return (lhs + expr.sep + rhs).label(expr.lhs._name)
else:
return (lhs + rhs).label(expr.lhs._name)
@dispatch(UnaryStringFunction, ColumnElement)
def compute_up(expr, data, **kwargs):
func_name = type(expr).__name__
return getattr(sa.sql.func, func_name)(data).label(expr._name)
@dispatch(notnull, ColumnElement)
def compute_up(expr, data, **kwargs):
return data != None
@toolz.memoize
def table_of_metadata(metadata, name):
if metadata.schema is not None:
name = '.'.join((metadata.schema, name))
if name not in metadata.tables:
metadata.reflect(views=metadata.bind.dialect.supports_views)
return metadata.tables[name]
def table_of_engine(engine, name):
metadata = metadata_of_engine(engine)
return table_of_metadata(metadata, name)
@dispatch(Field, sa.engine.Engine)
def compute_up(expr, data, **kwargs):
return table_of_engine(data, expr._name)
@dispatch(DateTime, ColumnElement)
def compute_up(expr, data, **kwargs):
attr = expr.attr
if attr == 'date':
return sa.func.date(data).label(expr._name)
elif attr == 'dayofyear':
attr = 'doy'
return sa.extract(attr, data).cast(dshape_to_alchemy(expr.schema)).label(
expr._name,
)
@dispatch(dayofweek, ColumnElement)
def compute_up(expr, data, **kwargs):
# ``datetime.datetime.weekday()`` and ``pandas.Timestamp.dayofweek`` use
# monday=0 but sql uses sunday=0. We add 6 (7 - 1) to subtract 1 in Z7
# which will align on monday=0.
# We cannot use ``(extract(dow from data) - 1) % 7`` because in sql
# ``-1 % 7 = -1``, so instead we roll forward by 6 which is functionally
# equivalent.
# We also need to cast the result of the extract to a small integer because
# postgres returns a double precision for the result.
# January 2014
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 3
# 2014-01-05 is a Sunday
# bz=# select extract('dow' from '2014-01-05'::timestamp);
# date_part
# -----------
# 0
# (1 row)
#
# In [1]: pd.Timestamp('2014-01-05').dayofweek
# Out[1]: 6
#
# bz=# select extract('dow' from '2014-01-05'::timestamp)::smallint + 6 % 7;
# ?column?
# ----------
# 6
# (1 row)
return ((sa.extract('dow', data).cast(sa.SmallInteger) + 6) % 7).label(
expr._name,
)
@dispatch(DateTimeTruncate, ColumnElement)
def compute_up(expr, data, **kwargs):
return sa.func.date_trunc(expr.unit, data).label(expr._name)
@compiles(sa.sql.elements.Extract, 'hive')
def hive_extract_to_date_function(element, compiler, **kwargs):
func = getattr(sa.func, element.field)(element.expr)
return compiler.visit_function(func, **kwargs)
@compiles(sa.sql.elements.Extract, 'mssql')
def mssql_extract_to_datepart(element, compiler, **kwargs):
func = sa.func.datepart(sa.sql.expression.column(element.field),
element.expr)
return compiler.visit_function(func, **kwargs)
def engine_of(x):
if isinstance(x, Engine):
return x
if isinstance(x, MetaData):
return x.bind
if isinstance(x, Table):
return x.metadata.bind
raise NotImplementedError("Can't deterimine engine of %s" % x)
@dispatch(object)
def _subexpr_optimize(expr):
return expr
@dispatch(Expr)
def _subexpr_optimize(expr):
return type(expr)(*map(_subexpr_optimize, expr._args))
timedelta_ns = TimeDelta(unit='ns')
@dispatch(reductions.any)
def _subexpr_optimize(expr):
if expr.axis != tuple(range(expr._child.ndim)):
raise ValueError("Cannot perform 'any' over an axis: %s")
if expr.keepdims:
raise ValueError("Cannot perform 'any' with keepdims=True")
return expr._child.coerce(Option(int32)
if isinstance(expr._child.dshape, Option) else
int32).sum() != 0
@dispatch(reductions.all)
def _subexpr_optimize(expr):
if expr.axis != tuple(range(expr._child.ndim)):
raise ValueError("Cannot perform 'all' over an axis: %s")
if expr.keepdims:
raise ValueError("Cannot perform 'all' with keepdims=True")
return (~expr._child).coerce(Option(int32)
if isinstance(expr._child.dshape, Option) else
int32).sum() == 0
@dispatch(Sub)
def _subexpr_optimize(expr):
new_expr = type(expr)(*map(_subexpr_optimize, expr._args))
schema = expr.schema
# we have a timedelta shaped expression; sql timedeltas are in `ns` units
# so we should coerce this exprssion over
if isinstance(schema, TimeDelta) and schema.unit != 'ns':
new_expr = new_expr.coerce(timedelta_ns)
return new_expr
@dispatch(Tail)
def _subexpr_optimize(expr):
child = sorter = expr._child
while not isinstance(sorter, Sort):
try:
sorter = sorter._child
except AttributeError:
break
else:
# Invert the sort order, then take the head, then re-sort based on
# the original key.
return child._subs({
sorter: sorter._child.sort(
sorter._key,
ascending=not sorter.ascending,
),
}).head(expr.n).sort(sorter._key, ascending=sorter.ascending)
# If there is no sort order, then we can swap out a head with a tail.
# This is equivalent in this backend and considerably faster.
warnings.warn(
"'tail' of a sql operation with no sort is the same as 'head'",
)
return child.head(expr.n)
@dispatch(Expr, ClauseElement)
def optimize(expr, _):
collected = broadcast_collect(expr, no_recurse=Selection)
return reduce(
lambda expr, term: expr._subs({term: _subexpr_optimize(term)}),
collected._subterms(),
collected,
)
@dispatch(Field, sa.MetaData)
def compute_up(expr, data, **kwargs):
return table_of_metadata(data, expr._name)
@dispatch(Expr, ClauseElement)
def post_compute(_, s, **kwargs):
return select(s)
@dispatch((Iterable, Selectable))
def _coerce_op_input(i):
"""Make input to SQLAlchemy operator an amenable type.
Parameters
----------
i : (Iterable, Selectable)
The iterable or selectable to coerce.
Returns
-------
coerced_input : (Iterable, Selectable)
The iterable or selectable passed to the function.
These types are already the amenable types.
"""
return i
@dispatch(ColumnElement)
def _coerce_op_input(i):
"""Make input to SQLAlchemy operator an amenable type.
Parameters
----------
i : ColumnElement
The column element to coerce.
Returns
-------
coerced_input : sa.selectable.Select
A select wrapping the column element.
"""
return select(i)
@dispatch(IsIn, ColumnElement, (Iterable, Selectable, ColumnElement))
def compute_up(expr, data, keys, **kwargs):
return data.in_(_coerce_op_input(keys))
@dispatch(IsIn, Selectable, (Iterable, Selectable, ColumnElement))
def compute_up(expr, data, keys, **kwargs):
assert len(data.columns) == 1, (
'only 1 column is allowed in a Select in IsIn'
)
col, = unsafe_inner_columns(data)
return reconstruct_select((col.in_(_coerce_op_input(keys)),), data)
@dispatch(Slice, (Select, Selectable, ColumnElement))
def compute_up(expr, data, **kwargs):
index = expr.index[0] # [0] replace_slices returns tuple ((start, stop), )
if isinstance(index, slice):
start = index.start or 0
if start < 0:
raise ValueError('start value of slice cannot be negative'
' with a SQL backend')
stop = index.stop
if stop is not None and stop < 0:
raise ValueError('stop value of slice cannot be negative with a '
'SQL backend.')
if index.step is not None and index.step != 1:
raise ValueError('step parameter in slice objects not supported '
'with SQL backend')
elif isinstance(index, (np.integer, numbers.Integral)):
if index < 0:
raise ValueError('integer slice cannot be negative for the'
' SQL backend')
start = index
stop = start + 1
else:
raise TypeError('type %r not supported for slicing wih SQL backend'
% type(index).__name__)
warnings.warn('The order of the result set from a Slice expression '
'computed against the SQL backend is not deterministic.')
if stop is None: # Represents open-ended slice. e.g. [3:]
return select(data).offset(start)
else:
return select(data).offset(start).limit(stop - start)
@dispatch(Coerce, ColumnElement)
def compute_up(expr, data, **kwargs):
return sa.cast(data, dshape_to_alchemy(expr.to)).label(expr._name)
@dispatch(Coerce, Select)
def compute_up(expr, data, **kwargs):
column = first(data.inner_columns)
cast = sa.cast(column, dshape_to_alchemy(expr.to)).label(expr._name)
return reconstruct_select([cast], data)
@dispatch(Shift, ColumnElement)
def compute_up(expr, data, **kwargs):
return sa.func.lag(data, expr.n).over().label(expr._name)
@compute_up.register(Coalesce, (ColumnElement, base), ColumnElement)
@compute_up.register(Coalesce, ColumnElement, base)
def coalesce_sql(t, lhs, rhs, **kwargs):
return sa.sql.functions.coalesce(lhs, rhs).label(t._name)
@compute_up.register(Coalesce, Select, base)
@compute_up.register(Coalesce, base, Select)
def coalesce_sql_select(expr, lhs, rhs, **kwargs):
if isinstance(lhs, Select):
orig = lhs
lhs = first(lhs.inner_columns)
else:
orig = rhs
rhs = first(rhs.inner_columns)
result = sa.sql.functions.coalesce(lhs, rhs).label(expr._name)
return reconstruct_select([result], orig)
@dispatch(Coalesce, (Select, ColumnElement))
def compute_up(expr, data, **kwargs):
if isinstance(expr.lhs, Expr):
lhs = data
rhs = expr.rhs
else:
lhs = expr.lhs
rhs = expr.data
return compute_up(expr, lhs, rhs, **kwargs)
| bsd-3-clause |
sbg2133/miscellaneous_projects | carina/planck_lic.py | 1 | 4381 | from getIQU import IQU
from subprocess import call
import sys, os
import numpy as np
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Gaussian2DKernel
from astropy.io import fits
from astropy.wcs import WCS
import scipy.ndimage
from skimage import filters
plt.ion()
stokes = ['I', 'Q', 'U']
planck_dir = './carinaData/planckData'
filename = os.path.join(planck_dir, "planck_353_carinaneb_pol.fits")
hdulist = fits.open(filename)
for s, param in enumerate(stokes):
if (stokes[s] == 'I'):
I = hdulist[s+1].data
I[I == 0.0] = np.nan
#wcs = WCS(hdulist[s+1].header)
for s, param in enumerate(stokes):
if (stokes[s] == 'Q'):
Q = hdulist[s+1].data
Q[Q == 0.0] = np.nan
#wcs = WCS(hdulist[s+1].header)
for s, param in enumerate(stokes):
if (stokes[s] == 'U'):
U = hdulist[s+1].data
U[U == 0.0] = np.nan
wcs = WCS(hdulist[s+1].header)
I = I[30:-30,260:-260]
Q = Q[30:-30,260:-260]
U = U[30:-30,260:-260]
Pvals = np.sqrt(Q**2 + U**2)
pvals = Pvals/I
# Correct pvals as in Jamil's thesis, 5.7
#pvals[pvals > 0.5] = np.nan
phi = 0.5*np.arctan2(U,Q)
dx = np.cos(phi)
dy = np.sin(phi)
mag = np.sqrt(dx**2 + dy**2)
X = np.linspace(0, I.shape[1], I.shape[1])
Y = np.linspace(0, I.shape[0], I.shape[0])
print "Y =", I.shape[1]
print "X =", I.shape[0]
xs, ys = np.meshgrid(X,Y)
"""
plt.figure()
nskip = 2
skip = (slice(None, None, nskip), slice(None, None, nskip))
#f = aplpy.FITSFigure(I, figsize = (10.24,7.68), dpi = 100)
ax = plt.gca()
ax.imshow(I, cmap = "gist_heat")
#f.tick_labels.set_font(size='small')
#f.show_colorscale(cmap='gist_heat')
# Add polarization vectors
ax.quiver(xs[skip],ys[skip],(dx/mag)[skip],(dy/mag)[skip], color = "white", angles = 'xy', units = 'xy', scale_units = 'xy', scale = 0.3)
#f.show_vectors(pvals, phi, color = 'white', rotate = 90., scale = 50, step = 10)
ax.set_facecolor('black')
plt.tight_layout()
"""
xsize, ysize = len(X), len(Y)
vectors = np.array([dx,dy])
#white = np.random.rand(xsize, ysize)
#white = np.random.uniform(low = 0., high = 1., size = (xsize, ysize))
white = np.random.normal(0., 1., size = (xsize,ysize))
#white = scipy.ndimage.gaussian_filter(white, sigma)
with file('texture.dat', 'w') as outfile:
for row in white:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dx.dat', 'w') as outfile:
for row in dx:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dy.dat', 'w') as outfile:
for row in dy:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
command = ["./carina_lic", str(ysize), str(xsize)]
call(command)
lic = np.loadtxt("./lic.dat")
#np.save('lic.npy', lic)
lic = np.transpose(lic)
#lic = np.load("lic.npy")
lic += np.abs(np.nanmin(lic))
lic[lic > 3*np.nanstd(lic)] *= 100*lic[lic > 3*np.nanstd(lic)]
mult = lic * I
"""
blur_size = 8
unsharp_strength = 0.8
blurred = filter.gaussian_filter(lic, blur_size)
highpass = lic - unsharp_strength*blurred
sharp = lic + highpass
lowpass = scipy.ndimage.gaussian_filter(lic, 5)
highpass = lic - lowpass
highpass += lic
"""
fig1 = plt.figure(figsize=(10.24, 7.68), dpi = 100)
ax = fig1.add_subplot(1, 1, 1, projection=wcs)
ax.set_facecolor("k")
ax.imshow(lic, cmap = "inferno", interpolation = "hanning")
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.set_xlabel('RA', fontsize = 16, fontweight = 'bold')
ax.set_ylabel('DEC', fontsize = 16, fontweight = 'bold')
plt.tight_layout()
fig2 = plt.figure(figsize=(10.24, 7.68), dpi = 100)
ax = fig2.add_subplot(1, 1, 1, projection=wcs)
plt.imshow(I, cmap = "inferno", alpha = 1)
plt.imshow(lic, cmap = "gray", alpha = 0.30, interpolation = "hanning")
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.set_xlabel('RA', fontsize = 16, fontweight = 'bold')
ax.set_ylabel('DEC', fontsize = 16, fontweight = 'bold')
ax.set_facecolor("k")
plt.tight_layout()
fig3 = plt.figure(figsize=(10.24, 7.68), dpi = 100)
ax = fig3.add_subplot(1, 1, 1, projection=wcs)
plt.imshow(mult, cmap = "inferno", vmin = 0, vmax = 100.)
ax.tick_params(axis='x', labelsize=18)
ax.tick_params(axis='y', labelsize=18)
ax.set_xlabel('RA', fontsize = 16, fontweight = 'bold')
ax.set_ylabel('DEC', fontsize = 16, fontweight = 'bold')
ax.set_facecolor("k")
plt.tight_layout()
| gpl-3.0 |
shipci/sympy | examples/intermediate/mplot3d.py | 14 | 1261 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
bellwethers-in-se/defects | src/old/Test Oracle/Prediction.py | 1 | 1996 | from __future__ import division
from sklearn.ensemble import RandomForestClassifier
from methods1 import *
from smote import *
def formatData(tbl):
""" Convert Tbl to Pandas DataFrame
:param tbl: Thing object created using function createTbl
:returns table in a DataFrame format
"""
Rows = [i.cells for i in tbl._rows]
headers = [i.name for i in tbl.headers]
return pd.DataFrame(Rows, columns=headers)
def Bugs(tbl):
cells = [i.cells[-2] for i in tbl._rows]
return cells
def rforest(train, test, tunings=None):
""" Random Forest
:param train: Thing object created using function createTbl
:param test: Thing object created using function createTbl
:param tunings: List of tunings obtained from Differential Evolution
tunings=[n_estimators, max_features, min_samples_leaf, min_samples_split]
:return preds: Predicted bugs
"""
assert type(train) is Thing, "Train is not a Thing object"
assert type(test) is Thing, "Test is not a Thing object"
train = SMOTE(train, atleast=50, atmost=101, resample=True)
if not tunings:
clf = RandomForestClassifier(n_estimators=100, random_state=1)
else:
clf = RandomForestClassifier(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3]))
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def _RF():
"Test RF"
dir = 'Data/Jureczko'
one, two = explore(dir)
train, test = createTbl(one[0]), createTbl(two[0])
actual = Bugs(test)
predicted = rforest(train, test)
set_trace()
if __name__ == '__main__':
_RF()
| mit |
grhawk/ASE | ase/io/png.py | 4 | 1758 | from ase.io.eps import EPS
class PNG(EPS):
def write_header(self):
from matplotlib.backends.backend_agg import RendererAgg
try:
from matplotlib.transforms import Value
except ImportError:
dpi = 72
else:
dpi = Value(72)
self.renderer = RendererAgg(self.w, self.h, dpi)
#self.gc = GraphicsContextBase()
#self.gc.set_linewidth(2)
def write_trailer(self):
renderer = self.renderer
if hasattr(renderer._renderer, 'write_png'):
# Old version of matplotlib:
renderer._renderer.write_png(self.filename)
else:
from matplotlib import _png
# buffer_rgba does not accept arguments from version 1.2.0
# https://github.com/matplotlib/matplotlib/commit/f4fee350f9fbc639853bee76472d8089a10b40bd
import matplotlib
if matplotlib.__version__ < '1.2.0':
x = renderer._renderer.buffer_rgba(0, 0)
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
self.filename, 72)
else:
x = renderer._renderer.buffer_rgba()
_png.write_png(renderer._renderer.buffer_rgba(),
renderer.width, renderer.height,
self.filename, 72)
def write_png(filename, atoms, **parameters):
if isinstance(atoms, list):
if len(atoms) > 1:
raise RuntimeError("Don't know how to save more than "+
"one image to PNG image!")
else:
atoms = atoms[0]
PNG(atoms, **parameters).write(filename)
| gpl-2.0 |
e-koch/VLA_Lband | 14B-088/HI/imaging/imaging_tests/HI_testing_analysis.py | 1 | 2781 |
'''
Split from HI_testing_comparison because I didn't feel like getting statsmodels
to play nice within CASA.
'''
import os
import pandas as pd
import statsmodels.formula.api as sm
data_path = os.path.expanduser("~/MyRAID/M33/14B-088/HI/channel_testing")
data = pd.read_csv(os.path.join(data_path, "property_values.csv"))
# So my path to CASA 4.7 was wrong :) They all failed.
data = data[data["CASAVer"] != 470]
# Drop any other NaNs
data = data.dropna()
# Let's model some stuff! Treat the different CASA versions as random mixed
# effect
data["CASAVer"][data["CASAVer"] == 440] = 0
data["CASAVer"][data["CASAVer"] == 453] = 1
data["CASAVer"][data["CASAVer"] == 460] = 2
# data["CASAVer"][data["CASAVer"] == 470] = 3
# Create a version without any diverging cleans
good_data = data[data["peak_res"] < 0.01]
# Sum
sum_model = sm.mixedlm("sum ~ Tclean*AllFields*MScale*Mask*Model", data=data,
groups=data["CASAVer"]).fit(reml=False)
print(sum_model.summary())
# Can't use Tclean. Makes matrix singular.
sum_model_good = sm.mixedlm("sum ~ AllFields*MScale*Mask*Model", data=good_data,
groups=good_data["CASAVer"]).fit(reml=False)
print(sum_model_good.summary())
# Dominated by model (duh)
# Median
median_model = \
sm.mixedlm("median ~ Tclean*AllFields*MScale*Mask*Model", data=data,
groups=data["CASAVer"]).fit(reml=False)
print(median_model.summary())
# Can't use Tclean. Makes matrix singular.
median_model_good = \
sm.mixedlm("median ~ AllFields*MScale*Mask*Model", data=good_data,
groups=good_data["CASAVer"]).fit(reml=False)
print(median_model_good.summary())
# Dominated by inclusion of model (duh). Some interaction with AllFields
# Std
std_model = \
sm.mixedlm("std ~ Tclean*AllFields*MScale*Mask*Model", data=data,
groups=data["CASAVer"]).fit(reml=False)
print(std_model.summary())
# Can't use Tclean. Makes matrix singular.
std_model_good = \
sm.mixedlm("std ~ AllFields*MScale*Mask*Model", data=good_data,
groups=good_data["CASAVer"]).fit(reml=False)
print(std_model_good.summary())
# High significance of model
# Peak Residual
peakres_model = \
sm.mixedlm("peak_res ~ Tclean*AllFields*MScale*Mask*Model", data=data,
groups=data["CASAVer"]).fit(reml=False)
print(peakres_model.summary())
# Can't use Tclean. Makes matrix singular.
peakres_model_good = \
sm.mixedlm("peak_res ~ AllFields*MScale*Mask*Model", data=good_data,
groups=good_data["CASAVer"]).fit(reml=False)
print(peakres_model_good.summary())
# High significance of Mscale. Model also somewhat significant
# Interaction between the two also significant
# In all cases, the version of CASA used makes no difference. Excellent!
| mit |
shahankhatch/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
mromanello/CitationExtractor | citation_extractor/ned/matchers.py | 1 | 39974 | # -*- coding: utf-8 -*-
# author: Matteo Romanello, matteo.romanello@gmail.com, Matteo Filipponi
"""Contains various implementations of citation matchers."""
from __future__ import print_function
import os
import pickle
import sys
import pdb
import time
import logging
import multiprocessing
from operator import itemgetter
from collections import namedtuple
from nltk.metrics import edit_distance
from pyCTS import CTS_URN
from citation_extractor.extra.pysuffix.suffixIndexers import DictValuesIndexer
from citation_parser import CitationParser
from citation_extractor.ned import NIL_URN
from citation_extractor.Utils.strmatching import *
from citation_extractor.ned.features import FeatureExtractor
from citation_extractor.ned.ml import LinearSVMRank
from citation_extractor.ned.candidates import CandidatesGenerator
global logger
logger = logging.getLogger(__name__)
LOGGER = logger
# TODO: not sure about `scope`
Result = namedtuple('DisambiguationResult', 'mention, entity_type, scope, urn')
# TODO: could be moved to StringUtils (?)
def longest_common_substring(s1, s2):
"""
Taken from https://en.wikibooks.org/wiki/Algorithm_Implementation/\
Strings/Longest_common_substring#Python
"""
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def select_lcs_match(citation_string, matches, n_guess=1):
"""TODO."""
# iterate and get what's the lowest ed_score
# then keep only the matches with lowest (best) score
# then keep the one with longest common string
lowest_score = 1000
for m in matches:
score = m[2]
if score < lowest_score:
lowest_score = score
filtered_matches = [m for m in matches if m[2] == lowest_score]
best_match = ("", None)
if (lowest_score > 0):
for match in filtered_matches:
lcs = longest_common_substring(match[1], citation_string)
if (len(lcs) > len(best_match[0])):
best_match = (lcs, match)
match = [best_match[1]]
logger.debug("Longest_common_substring selected %s out of %s" % (
match,
filtered_matches
))
else:
# TODO: use context here to disambiguate
match = matches[:n_guess]
return match
class CitationMatcher(object): # TODO: rename => FuzzyCitationMatcher
"""
TODO
docstring for CitationMatcher
"""
def __init__(
self,
knowledge_base=None,
fuzzy_matching_entities=False,
fuzzy_matching_relations=False,
min_distance_entities=1,
max_distance_entities=3,
distance_relations=3,
**kwargs
):
self.fuzzy_match_entities = fuzzy_matching_entities
self.fuzzy_match_relations = fuzzy_matching_relations
self.min_distance_entities = min_distance_entities if \
fuzzy_matching_entities else None
self.max_distance_entities = max_distance_entities if \
fuzzy_matching_entities else None
self.distance_relations = distance_relations if \
fuzzy_matching_relations else None
self._kb = knowledge_base
if 'author_names' in kwargs and 'work_titles' in kwargs \
and 'work_abbreviations' in kwargs and \
'author_abbreviations' in kwargs:
self._author_names = kwargs["author_names"]
self._author_abbreviations = kwargs["author_abbreviations"]
self._work_titles = kwargs["work_titles"]
self._work_abbreviations = kwargs["work_abbreviations"]
else:
logger.info("Initialising CitationMatcher...")
self._citation_parser = CitationParser()
logger.info("Fetching author names from the KB...")
author_names = knowledge_base.author_names
self._author_names = {key: StringUtils.normalize(author_names[key]) for key in author_names}
logger.info("Done. Fetching work titles from the KB...")
work_titles = knowledge_base.work_titles
self._work_titles = {key: StringUtils.normalize(work_titles[key]) for key in work_titles}
logger.info("Done. Fetching author abbreviations from the KB...")
author_abbreviations = knowledge_base.author_abbreviations
self._author_abbreviations = {key: StringUtils.normalize(author_abbreviations[key]) for key in
author_abbreviations}
logger.info("Done. Fetching work abbreviations from the KB...")
work_abbreviations = knowledge_base.work_abbreviations
self._work_abbreviations = {key: StringUtils.normalize(work_abbreviations[key]) for key in
work_abbreviations}
logger.info("Done. Now let's index all this information.")
self._author_idx, self._author_abbr_idx, self._work_idx, self._work_abbr_idx = self._initialise_indexes()
logger.info(self.settings)
def _initialise_indexes(self):
"""
Creates suffix arrays for efficient retrieval.
TODO: convert to lowercase before indexing (?)
"""
try:
logger.info("Start indexing author names...")
author_idx = DictValuesIndexer(self._author_names)
logger.info("Done. Start indexing author abbreviations...")
author_abbr_idx = DictValuesIndexer(self._author_abbreviations)
logger.info("Done. Start indexing work titles...")
work_idx = DictValuesIndexer(self._work_titles)
logger.info("Done. Start indexing work abbreviations...")
work_abbr_idx = DictValuesIndexer(self._work_abbreviations)
logger.info("Done with indexing.")
return author_idx, author_abbr_idx, work_idx, work_abbr_idx
except Exception, e:
raise e
@property
def settings(self):
"""Print to stdout the settings of the CitationMatcher."""
prolog = "%s initialisation settings:" % self.__class__
if self.fuzzy_match_entities:
entity_matching_settings = "\t-Entity matching: fuzzy matching=%s; min distance threshold=%i; max distance threshold=%i" % \
(self.fuzzy_match_entities, self.min_distance_entities,
self.max_distance_entities)
else:
entity_matching_settings = "\t-Entity matching: fuzzy matching=%s" % self.fuzzy_match_entities
if self.fuzzy_match_relations:
relation_matching_settings = "\t-Relation matching: fuzzy matching=%s; edit distance threshold=%i" % \
(self.fuzzy_match_relations, self.distance_relations)
else:
relation_matching_settings = "\t-Relation matching: fuzzy matching=%s" % self.fuzzy_match_relations
knowledge_base_extent = "\t-Extent of the KnowledgeBase: %i author abbreviations, %i author_names, %i work abbreviations, %i work titles." % \
(len(self._author_abbreviations)
, len(self._author_names)
, len(self._work_abbreviations)
, len(self._work_titles))
return "\n".join((prolog, entity_matching_settings, relation_matching_settings, knowledge_base_extent))
# TODO: remove and add to the `citation_parser`
def _format_scope(self, scope_dictionary):
"""
Args:
scope_dictionary:
{u'start': [u'1', u'100']}
returns:
string
"""
if (scope_dictionary.has_key("end")):
# is range
return "%s-%s" % (".".join(scope_dictionary["start"]), ".".join(scope_dictionary["end"]))
else:
# is not range
return ".".join(scope_dictionary["start"])
def _consolidate_result(
self,
urn_string,
citation_string,
entity_type,
scope
):
urn = CTS_URN(urn_string)
# check: does the URN have a scope but is missing the work element
if (urn.work is None):
# if so, try to get the opus maximum from the KB
opmax = self._kb.get_opus_maximum_of(urn)
if (opmax is not None):
logger.debug("%s is opus maximum of %s" % (opmax, urn))
urn = CTS_URN("{}".format(opmax.get_urn()))
return Result(citation_string, entity_type, scope, urn)
def _disambiguate_relation(self, citation_string, entity_type, scope, n_guess=1): # TODO: finish debugging
"""
:citation_string: e.g. "Hom. Il.
:scope: e.g. "1,100"
:return: a named tuple (see `Result`)
"""
# citation string has one single token
if len(citation_string.split(" ")) == 1:
match = self.matches_work(citation_string, self.fuzzy_match_relations, self.distance_relations)
# TODO this is problematic
# should be: match is None or match does not contain at least one entry with distance=0
zero_distance_match = False
if match is not None:
for m in match:
if m[2] == 0:
zero_distance_match = True
logger.debug(
"[%s %s] zero distance match is %s, match = %s" % (citation_string, scope, zero_distance_match, match))
if match is None or not zero_distance_match:
match = self.matches_author(citation_string, self.fuzzy_match_relations, self.distance_relations)
if match is not None:
# match = [(id,name,diff) for id, name, diff in match if diff == 0][:n_guess] # this has to be removed
pass
else:
# fuzzy matching as author
# then fuzzy matching as work
# ad the end take the matching with lowest score
pass
# citation string has two tokens
elif (len(citation_string.split(" ")) == 2):
tok1, tok2 = citation_string.split(" ")
# case 2: tok1 and tok2 are author
match = self.matches_author(citation_string, self.fuzzy_match_relations, self.distance_relations)
if match is not None:
if (len(match) <= n_guess):
match = match[:n_guess]
else:
match = select_lcs_match(citation_string, match, n_guess)
for urn_string, label, score in match:
result = self._consolidate_result(
urn_string,
citation_string,
entity_type,
scope
)
return result
else:
# case 3: tok1 and tok2 are work
match = self.matches_work(
citation_string,
self.fuzzy_match_relations,
self.distance_relations
)
if match is not None:
if (len(match) <= n_guess):
match = match[:n_guess]
else:
match = select_lcs_match(citation_string, match, n_guess)
for urn_string, label, score in match:
result = self._consolidate_result(
urn_string,
citation_string,
entity_type,
scope
)
return result
# case 1: tok1 is author and tok2 is work
match_tok1 = self.matches_author(tok1, self.fuzzy_match_relations, self.distance_relations)
match_tok2 = self.matches_work(tok2, self.fuzzy_match_relations, self.distance_relations)
if (match_tok1 is not None and match_tok2 is not None):
for id1, label1, score1 in match_tok1:
for id2, label2, score2 in match_tok2:
work = self._kb.get_resource_by_urn(id2)
if id1 == str(work.author.get_urn()):
match = [(id2, label2, score2)]
return Result(citation_string, entity_type, scope, CTS_URN(id2))
else:
# case 2: tok1 and tok2 are author
match = self.matches_author(citation_string, self.fuzzy_match_relations, self.distance_relations)
if match is None:
# case 3: tok1 and tok2 are work
match = self.matches_work(citation_string, self.fuzzy_match_relations, self.distance_relations)
# citation string has more than two tokens
elif (len(citation_string.split(" ")) > 2):
match = self.matches_author(citation_string, self.fuzzy_match_relations, self.distance_relations)
else:
logger.error("This case is not handled properly: {}".format(
citation_string
))
raise
# return only n_guess results
if match is None or len(match) == 0:
logger.debug("\'%s %s\': no disambiguation candidates were found." % (citation_string, scope))
return Result(citation_string, entity_type, scope, NIL_URN)
elif len(match) <= n_guess:
logger.debug("There are %i matches and `n_guess`==%i. Nothing to cut." % (len(match), n_guess))
elif len(match) > n_guess:
# iterate and get what's the lowest ed_score
# then keep only the matches with lowest (best) score
# then keep the one with longest common string
lowest_score = 1000
for m in match:
score = m[2]
if score < lowest_score:
lowest_score = score
filtered_matches = [m for m in match if m[2] == lowest_score]
best_match = ("", None)
if (lowest_score > 0):
for match in filtered_matches:
lcs = longest_common_substring(match[1], citation_string)
if (len(lcs) > len(best_match[0])):
best_match = (lcs, match)
match = [best_match[1]] # TODO: check this; don't think it's correct
logger.debug("Longest_common_substring selected %s out of %s" % (match, filtered_matches))
else:
# TODO: use context here to disambiguate
match = match[:n_guess]
for urn_string, label, score in match:
urn = CTS_URN(urn_string)
# check: does the URN have a scope but is missing the work element (not possible)?
if (urn.work is None):
# if so, try to get the opus maximum from the KB
opmax = self._kb.get_opus_maximum_of(urn)
if (opmax is not None):
logger.debug("%s is opus maximum of %s" % (opmax, urn))
urn = CTS_URN("{}".format(opmax.get_urn()))
return Result(citation_string, entity_type, scope, urn)
def _disambiguate_entity(self, mention, entity_type):
"""
When no match is found it's better not to fill with a bogus URN. The
reason is that in some cases it's perfectly ok that no match is found. An entity
can be valid entity also without having disambiguation information in the groundtruth.
:param mention:
:param entity_type:
:return: a named tuple (see `Result`)
"""
result = []
matches = []
distance_threshold = self.min_distance_entities
max_distance_threshold = self.max_distance_entities
"""
string = mention.encode("utf-8") # TODO: add a type check
regex_clean_string = r'(« )|( »)|\(|\)|\,'
cleaned_string = re.sub(regex_clean_string,"",string)
string = cleaned_string
"""
string = mention
if entity_type == "AAUTHOR":
if self.fuzzy_match_entities:
matches = self.matches_author(string, True, distance_threshold)
while (matches is None and distance_threshold <= max_distance_threshold):
distance_threshold += 1
matches = self.matches_author(string, True, distance_threshold)
else:
matches = self.matches_author(string, False)
elif (entity_type == "AWORK"):
if self.fuzzy_match_entities:
matches = self.matches_work(string, True, distance_threshold)
while (matches is None and distance_threshold <= max_distance_threshold):
distance_threshold += 1
matches = self.matches_work(string, True, distance_threshold)
else:
matches = self.matches_work(string, False)
else:
# TODO: raise exception
logger.warning("unknown entity type: %s" % entity_type)
if (matches is not None and len(matches) > 0):
lowest_score = 1000
for match in matches:
score = match[2]
if (score < lowest_score):
lowest_score = score
filtered_matches = [match for match in matches if match[2] == lowest_score]
filtered_matches = sorted(filtered_matches, key=itemgetter(2))
best_match = ("", None)
if (lowest_score > 0):
for match in filtered_matches:
lcs = longest_common_substring(match[1], string)
if (len(lcs) > len(best_match[0])):
best_match = (lcs, match)
if (best_match[1] is not None):
return Result(mention, entity_type, None, best_match[1][0])
else:
# TODO: perhaps log some message
return Result(mention, entity_type, None, filtered_matches[0][0])
else:
return Result(mention, entity_type, None, filtered_matches[0][0])
else:
return Result(mention, entity_type, None, NIL_URN)
def matches_author(self, string, fuzzy=False, distance_threshold=3):
"""
This function retrieves from the KnowledgeBase possible authors that match the search string.
None is returned if no matches are found.
:param string: the string to be matched
:param fuzzy: whether exact or fuzzy string matching should be applied
:distance_threshold: the maximum edit distance threshold (ignored if `fuzzy==False`)
:return: a list of tuples, ordered by distance between the seach and the matching string, where:
tuple[0] contains the id (i.e. CTS URN) of the matching author
tuple[1] contains a label of the matching author
tuple[2] is the distance, measured in characters, between the search string and the matching string
or None if no match is found.
"""
# string = string.lower()
author_matches, abbr_matches = [], []
if (not fuzzy):
author_matches = [(id.split("$$")[0]
, self._author_names[id]
, len(self._author_names[id]) - len(string))
for id in self._author_idx.searchAllWords(string)]
abbr_matches = [(id.split("$$")[0]
, self._author_abbreviations[id]
, len(self._author_abbreviations[id]) - len(string))
for id in self._author_abbr_idx.searchAllWords(string)]
else:
abbr_matches = [(id.split("$$")[0]
, self._author_abbreviations[id]
, edit_distance(string, self._author_abbreviations[id]))
for id in self._author_abbreviations
if edit_distance(string, self._author_abbreviations[id]) <= distance_threshold]
abbr_matches = sorted(abbr_matches, key=itemgetter(2))
author_matches = []
for id in self._author_names:
if (string.endswith(".")):
if string.replace(".", "") in self._author_names[id]:
if (len(string) > (len(self._author_names[id]) / 2)):
try:
assert abbr_matches[0][2] == 0
distance = len(self._author_names[id]) - len(string)
if distance < 0:
distance = 1
author_matches.append((id.split("$$")[0], self._author_names[id], distance))
except Exception, e:
author_matches.append((id.split("$$")[0], self._author_names[id], 0))
else:
if (edit_distance(string, self._author_names[id]) <= distance_threshold):
author_matches.append((id.split("$$")[0], self._author_names[id],
edit_distance(string, self._author_names[id])))
else:
if (edit_distance(string, self._author_names[id]) <= distance_threshold):
author_matches.append(
(id.split("$$")[0], self._author_names[id], edit_distance(string, self._author_names[id])))
if (len(author_matches) > 0 or len(abbr_matches) > 0):
return sorted(author_matches + abbr_matches, key=itemgetter(2))
else:
return None
def matches_work(self, string, fuzzy=False, distance_threshold=3):
"""
This function retrieves from the KnowledgeBase possible works that match the search string.
None is returned if no matches are found.
:param string: the string to be matched
:param fuzzy: whether exact or fuzzy string matching should be applied
:distance_threshold: the maximum edit distance threshold (ignored if `fuzzy==False`)
:return: a list of tuples, ordered by distance between the seach and the matching string, where:
tuple[0] contains the id (i.e. CTS URN) of the matching work
tuple[1] contains a label of the matching work
tuple[2] is the distance, measured in characters, between the search string and the matching string
or None if no match is found.
"""
# string = string.lower()
work_matches, work_abbr_matches = [], []
if (not fuzzy):
work_matches = [(id.split("$$")[0]
, self._work_titles[id]
, len(self._work_titles[id]) - len(string))
for id
in self._work_idx.searchAllWords(string)]
work_abbr_matches = [(id.split("$$")[0]
, self._work_abbreviations[id]
, len(self._work_abbreviations[id]) - len(string))
for id
in self._work_abbr_idx.searchAllWords(string)]
logger.debug("Matching works: %s (fuzzy matching=%s)" % (work_matches, fuzzy))
logger.debug("Matching work abbreviations: %s (fuzzy matching=%s)" % (work_abbr_matches, fuzzy))
else:
string = string.lower()
work_matches = []
for id in self._work_titles:
distance = edit_distance(string, self._work_titles[id])
if distance <= distance_threshold:
work_matches.append(
(id.split("$$")[0]
, self._work_titles[id]
, distance)
)
work_abbr_matches = [(id.split("$$")[0]
, self._work_abbreviations[id]
, edit_distance(string, self._work_abbreviations[id].lower()))
for id in self._work_abbreviations
if edit_distance(string, self._work_abbreviations[id].lower()) <= distance_threshold]
logger.debug("Matching works: %s (fuzzy matching=%s; edit_distance_threshold=%i)" % (work_matches
, fuzzy
, distance_threshold))
logger.debug(
"Matching work abbreviations: %s (fuzzy matching=%s; edit_distance_threshold=%i)" % (work_abbr_matches
, fuzzy
,
distance_threshold))
if (len(work_matches) > 0 or len(work_abbr_matches) > 0):
return sorted(work_matches + work_abbr_matches, key=itemgetter(2))
else:
return None
def disambiguate(self, surface, entity_type, scope=None, n_results=1, **kwargs):
"""
:param surface:
:param type:
:param scope:
:param n_results:
"""
assert surface is not None
cleaned_surface = StringUtils.normalize(surface)
logger.debug(
u"Citation string before/after cleaning: \"{}\" => \"{}\"".format(
surface,
cleaned_surface
)
)
# TODO: log the result
if scope is None:
return self._disambiguate_entity(cleaned_surface, entity_type)
elif scope is not None:
return self._disambiguate_relation(
cleaned_surface,
entity_type,
scope,
n_results
)
def to_pickle(self, path):
with open(path, 'wb') as picklefile:
pickle.dump(self, picklefile)
return
@staticmethod
def from_pickle(path):
with open(path, 'rb') as picklefile:
return pickle.load(picklefile)
class MLCitationMatcher(object): # TODO: renaming RankingCitationMatcher (?)
"""Machine Learning-based Citation Matcher.
This matcher uses a supervised learning-to-rank framework to build a model
from a set of labeled entity mentions.
"""
def __init__(self, train_data, kb=None, parallelize=False, **kwargs):
"""Initialize an instance of MLCitationMatcher.
:param kb: an instance of HuCit KnowledgeBase
:type kb: knowledge_base.KnowledgeBase
:param train_data: a set of labeled mentions to be used as train data
:type train_data: pandas.DataFrame
Optional kwargs:
(TODO: decide whether to keep them in the final version)
- `feature_extractor`
- `candidate_generator`
- `include_nil`
- `nb_processes`
"""
LOGGER.info('Initializing ML-Citation Matcher')
# a dictionary to store the settings so that we can print them
# if needed
self._settings = {}
self._settings["parallelize"] = parallelize
if "include_nil" in kwargs and kwargs["include_nil"]:
self._settings["include_nil"] = True
else:
self._settings["include_nil"] = False
if "feature_extractor" in kwargs:
self._feature_extractor = kwargs["feature_extractor"]
else:
self._feature_extractor = FeatureExtractor(kb, train_data)
if "nb_processes" in kwargs:
self._settings["nb_processes"] = kwargs["nb_processes"]
else:
self._settings["nb_processes"] = 10
if "C" in kwargs:
self._settings["C_param"] = kwargs["C"]
else:
self._settings["C_param"] = 10
if "find_C" in kwargs:
self._settings["find_C"] = True
else:
self._settings["find_C"] = False
# normalize authors and works once, the pass to both
# CandidatesGenerator and FeatureExtractor
self._kb_norm_authors = self._feature_extractor._kb_norm_authors
self._kb_norm_works = self._feature_extractor._kb_norm_works
if "candidate_generator" in kwargs:
self._cg = kwargs["candidate_generator"]
else:
self._cg = CandidatesGenerator(
kb,
kb_norm_authors=self._kb_norm_authors,
kb_norm_works=self._kb_norm_works
)
self._ranker = LinearSVMRank()
self._is_trained = False
LOGGER.info("ML-Citation Matcher initialized (took {} secs)".format(
time.clock()
))
self._train(train_data)
def _train(self, train_data, nb_processes=10):
"""Train the MLCitationMatcher with a set of labeled mentions.
:param train_data: a set of labeled mentions to be used as train data
:type train_data: pandas.DataFrame
:param include_nil: include the NIL entity as a candidate if the true
entity is not NIL (default is True)
:type include_nil: bool
"""
LOGGER.info('Starting training')
include_nil = self._settings["include_nil"]
if self._settings["parallelize"]:
LOGGER.info(
'Parallelization is enabled (nb_processes={})'.format(
nb_processes
)
)
pool = multiprocessing.Pool(processes=nb_processes)
X, y, groups = [], [], []
group_id = 0
# TODO: remove from production
pickle_path = "citation_extractor/data/pickles/all_candidates.pkl"
if os.path.exists(pickle_path):
with open(pickle_path, "rb") as pickle_file:
all_candidates = pickle.load(pickle_file)
LOGGER.info(
"Loaded candidates from file {}".format(pickle_path)
)
else:
all_candidates = self._cg.generate_candidates_parallel(train_data)
with open(pickle_path, "wb") as pickle_file:
pickle.dump(all_candidates, pickle_file)
for mention_id, row in train_data.iterrows():
LOGGER.info('Training with mention {}'.format(mention_id))
surface = row['surface_norm_dots']
scope = row['scope']
type = row['type']
doc_title = row['doc_title_norm']
mentions_in_title = row['doc_title_mentions']
doc_text = row['doc_text']
other_mentions = row['other_mentions']
true_urn = row['urn_clean']
# Get pre-generated candidates
candidates = all_candidates[mention_id]
# Remove true entity (need special treatment)
if true_urn in candidates:
candidates.remove(true_urn)
# Extract features
logger.info(
"Extracting features from {} candidates (parallel={})".format(
len(candidates),
self._settings["parallelize"]
)
)
feature_vectors = map(
lambda candidate: self._feature_extractor.extract(
m_surface=surface,
m_scope=scope,
m_type=type,
m_title_mentions=mentions_in_title,
m_title=doc_title,
m_doc_text=doc_text,
m_other_mentions=other_mentions,
candidate_urn=candidate
),
candidates
)
# Append not-true candidates values
for vector in feature_vectors:
X.append(vector)
y.append(0) # false
groups.append(group_id)
# Add the true entity (not NIL)
if true_urn != NIL_URN:
LOGGER.debug('True entity is not NIL')
true_feature_vector = self._feature_extractor.extract(
m_surface=surface,
m_scope=scope,
m_type=type,
m_title_mentions=mentions_in_title,
m_title=doc_title,
m_doc_text=doc_text,
m_other_mentions=other_mentions,
candidate_urn=true_urn
)
# Append true candidate values
feature_vectors.append(true_feature_vector)
X.append(true_feature_vector)
y.append(1) # true
groups.append(group_id)
# Include NIL if specified
if include_nil:
LOGGER.debug('Including NIL entity as candidate')
nil_feature_vector = self._feature_extractor.extract_nil(
m_type=type,
m_scope=scope,
feature_dicts=feature_vectors
)
X.append(nil_feature_vector)
y.append(0)
groups.append(group_id)
# Add the true entity (NIL)
else:
LOGGER.debug('True entity is NIL')
nil_feature_vector = self._feature_extractor.extract_nil(
m_type=type,
m_scope=scope,
feature_dicts=feature_vectors
)
X.append(nil_feature_vector)
y.append(1)
groups.append(group_id)
group_id += 1
if self._settings["parallelize"]:
pool.terminate()
# Fit SVMRank
self._ranker.fit(
X,
y,
groups,
C=self._settings["C_param"],
kfold_C_param=self._settings["find_C"]
)
self._is_trained = True
@property
def settings(self):
"""Return a string with the settings of the CitationMatcher."""
prolog = "%s initialisation settings:" % self.__class__
settings = [
"{}: {}".format(setting, self._settings[setting])
for setting in self._settings.keys()
]
settings.append("SVMRanker: {}".format(repr(self._ranker._classifier)))
settings.insert(0, prolog)
return "\n".join(settings)
def disambiguate(
self,
surface,
surface_norm,
surface_norm_dots,
scope,
mention_type,
doc_title,
mentions_in_title,
doc_text,
other_mentions,
nb_processes=10 # TODO: remove when using dask
):
"""Disambiguate an entity mention.
:param surface: the surface form of the mention
:type surface: unicode
:param scope: the scope of the mention (could be None)
:type scope: unicode
:param mention_type: type of the mention (AAUTHOR, AWORK, REFAUWORK)
:type mention_type: str
:param doc_title: the title of the document containing the mention
:type doc_title: unicode
:param mentions_in_title: the mentions extracted from the title
:type mentions_in_title: list of tuples [(m_type, m_surface), ...]
:param doc_text: the text of the document containing the mention
:type doc_text: unicode
:param other_mentions: the other mentions extracted from the same document
:type other_mentions: list of triples [(m_type, m_surface, m_scope), ...]
:param nb_processes: number of processes to be used for parallelization (default is 10)
:type nb_processes: int
:return: the URN of the candidate entity ranked first
:rtype: str
"""
# TODO: move some parameters to kwargs
LOGGER.info(
u'Disambiguating surface={} scope={} type={}'.format(
surface,
scope,
mention_type
)
)
# shortcuts
nb_processes = self._settings["nb_processes"]
include_nil = self._settings["include_nil"]
parallelize = self._settings["parallelize"]
if not self._is_trained:
raise Exception(
'method disambiguate() must be invoked after train()'
)
if parallelize:
LOGGER.debug(
'Parallelization is enabled (nb_processes={})'.format(
nb_processes
)
)
# Generate candidates
LOGGER.info('Generating candidates')
candidates = self._cg.generate_candidates(
surface_norm,
mention_type,
scope
)
LOGGER.info('Generated {} candidates'.format(len(candidates)))
# Extract features
LOGGER.info('Extracting features for each (mention, candidate) couple')
feature_vectors = map(
lambda candidate: self._feature_extractor.extract(
m_surface=surface_norm_dots,
m_scope=scope,
m_type=mention_type,
m_title_mentions=mentions_in_title,
m_title=doc_title,
m_doc_text=doc_text,
m_other_mentions=other_mentions,
candidate_urn=candidate
),
candidates
)
# Include NIL candidate if specified
if include_nil:
LOGGER.info('Including NIL entity as candidate')
candidates.append(NIL_URN)
nil_feature_vector = self._feature_extractor.extract_nil(
m_type=mention_type,
m_scope=scope,
feature_dicts=feature_vectors
)
feature_vectors.append(nil_feature_vector)
# Check whether there are no candidates (in case of not include_nil)
# or just one
if len(candidates) == 0:
LOGGER.info('Zero candidates. Returning NIL')
return Result(
mention=surface,
entity_type="type",
scope=scope,
urn=NIL_URN
)
elif len(candidates) == 1:
LOGGER.info('Exactly one candidate. Skipping ranking.')
return Result(
mention=surface,
entity_type="type",
scope=scope,
urn=candidates[0]
)
# Rank candidates
ranked_columns, scores = self._ranker.predict(feature_vectors)
winner_column = ranked_columns[0]
winner_score = scores[0]
winner_candidate = candidates[winner_column]
LOGGER.info(
'Entity {} won with score {} (total candidates={})'.format(
winner_candidate,
winner_score,
len(candidates)
)
)
return Result(
mention=surface,
entity_type="type",
scope=scope,
urn=winner_candidate
)
| gpl-3.0 |
lpsinger/astropy | astropy/nddata/ccddata.py | 5 | 27852 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from .compat import NDDataArray
from .nduncertainty import (
StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance)
from astropy.io import fits, registry
from astropy import units as u
from astropy import log
from astropy.wcs import WCS
from astropy.utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarly disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit ** 2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit ** 2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
-----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarly disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True, key_uncertainty_type='UTYPE'):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError('only uncertainties of type {} can be saved.'
.format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None):
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
'saving uncertainties with a unit that is not '
'equivalent to the unit from the data unit is not '
'supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f'HIERARCH {key.upper()}'] = (
short_name, f"Shortened name for {key}")
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {'ELECTRONS/S': u.electron/u.s,
'ELECTRONS': u.electron,
'electrons': u.electron}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
_PCs = set(['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2'])
_CDs = set(['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'])
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS informations from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = '{}_{}_{}'
polynomials = ['A', 'B', 'AP', 'BP']
for poly in polynomials:
order = wcs.sip.__getattribute__(f'{poly.lower()}_order')
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j),
ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None,
key_uncertainty_type='UTYPE', **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, optional
FITS extension from which CCDData should be initialized. If zero and
and no data in the primary extension, it will search for the first
extension with data. The header will be added to the primary header.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f'unsupported keyword: {key}.'
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None')
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (hdus.info(hdu)[i][3] == 'ImageHDU' and
hdus.fileinfo(i)['datSpan'] > 0):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
'The Header value for the key BUNIT ({}) cannot be '
'interpreted as valid unit. To successfully read the '
'file as CCDData you can pass in a valid `unit` '
'argument explicitly or change the header of the FITS '
'file before reading it.'
.format(fits_unit_string))
else:
log.info("using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file."
.format(unit, fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(
ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, key_uncertainty_type='UTYPE', **kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
| bsd-3-clause |
ODM2/ODM2YODAParser | yodatools/excelparser/excelParser.py | 2 | 22474 | import os
import re
from collections import defaultdict
from uuid import uuid4
import wx
from datetime import datetime
from pubsub import pub
from pandas import isnull, DataFrame, NaT
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.session import Session
import openpyxl
from openpyxl.worksheet.table import Table
from openpyxl.workbook.workbook import Workbook
from openpyxl.cell.cell import Cell
from yodatools.excelparser.ParserException import ParserException
from odm2api.models import \
(Base,
DataSets,
Citations,
AuthorLists,
People,
Units,
SamplingFeatures,
Organizations,
Affiliations,
ProcessingLevels,
Sites,
SpatialReferences,
Methods,
Variables,
Actions,
FeatureActions,
ActionBy,
TimeSeriesResults,
DataSetsResults,
TimeSeriesResultValues,
CVUnitsType,
CVVariableName,
setSchema)
class ExcelParser(object):
TABLE_NAMES = [
'Analysis_Results',
'DatasetCitation'
'Organizations',
'People'
'ProcessingLevels',
'Sites',
'SpatialReferences',
'SpecimenAnalysisMethods',
'SpecimenCollectionMethods',
'Specimens',
'Units',
'Varibles'
]
def __init__(self, input_file, session_factory, **kwargs):
self.input_file = input_file
self.__session_factory = session_factory
self.session = session_factory.getSession() # type: Session
self.engine = session_factory.engine
self.total_rows_to_read = 0
self.rows_read = 0
self.workbook = None
self.sheets = []
self.name_ranges = {}
self.tables = {}
self.orgs = defaultdict(lambda: None)
self.affiliations = defaultdict(lambda: None)
self.data_set = defaultdict(lambda: None)
self.methods = defaultdict(lambda: None)
self.variables = defaultdict(lambda: None)
self.units = defaultdict(lambda: None)
self.processing_levels = defaultdict(lambda: None)
self.spatial_references = defaultdict(lambda: None)
self._init_data(input_file)
def _init_data(self, file_path):
self.update_progress_label('Loading %s' % file_path)
self.workbook = openpyxl.load_workbook(file_path, data_only=True) # type: Workbook
# Loop through worksheets to grab table data
for ws in self.workbook.worksheets:
tables = getattr(ws, '_tables', [])
for table in tables: # type: Table
if table.name in ['AuthorList', 'ExternalIDOrgs', 'ControlledVocabularies', 'ExternalIdentifiers']:
# skip these tables because they do not (currently) need to be parsed
# and they mess up the total row count calculation
continue
self.update_progress_label('Loading table data: %s' % table.name)
rows = ws[table.ref]
# check if table_rows length is less than 2, since the first row is just the table headers
# if True, then the current table has no data
if len(rows) < 2:
continue
# get headers from row 1
headers = map(lambda x: x.replace('[CV]', '').strip(), [cell.value for cell in rows[0]])
# get values from rows 2...n
data = [[cell.value for cell in row] for row in rows[1:]]
self.tables[table.name.strip()] = DataFrame(data, columns=headers).dropna(how='all')
self.update_progress_label('Calculating total row size')
self.total_rows_to_read = sum([table.shape[0] for table in self.tables.values()])
def get_or_create(self, model, values, check_fields=None, filter_by=None, commit=True): # type: (Base, dict, [str], str|[str], bool) -> Base
"""
Gets an existing instance of <model> or creates a new one if not found
:param model: The model from odm2api.models used to create the object
:param values: A dict containing the fields to insert into the database (given the record does not exist).
:param check_fields: A list of strings of required field names (optional).
:param filter_by: A string or list of strings used to filter queries by. If None, the query will filter using **values (optional).
:param commit: Boolean value indicating whether or not to commit the transaction.
:return: An instance of the retrieved or created model.
:raise ValueError: Raised when a value in values is NaT given the key exists in check_fields
"""
if check_fields:
bad_fields = []
for field in check_fields:
if isnull(values[field]):
bad_fields.append(field)
if len(bad_fields):
raise ValueError('Object "{}" is missing required fields: {}'.format(model.__tablename__.title(),
', '.join(bad_fields)))
filters = {}
if isinstance(filter_by, str):
filters[filter_by] = values.get(filter_by, None)
elif isinstance(filter_by, list):
for f in filter_by:
filters[f] = values.get(f, None)
else:
filters = values
instance = self.get(model, **filters)
if instance:
return instance
else:
return self.create(model, commit=commit, **values)
def create(self, model, commit=True, **kwargs):
"""
Creates an instance of <model>
:param model: an ODM2 model
:param commit: boolean, commits the newly created object if true
:param kwargs: keyword arguments used to create <model>
:return:
"""
instance = model(**kwargs)
self.session.add(instance)
if commit:
self.session.commit()
return instance
def get(self, model, **kwargs):
"""
Gets a single instance of an ODM2 model
:param model: class of the model to query
:param kwargs: values to use in query
:return: an instance of <model>
"""
try:
return self.session.query(model).filter_by(**kwargs).one()
except NoResultFound:
return None
def _flush(self):
try:
self.session.flush()
except IntegrityError as e:
if os.getenv('DEBUG') == 'true':
print(e)
self.session.rollback()
def update_progress_label(self, message, label_pos=1):
pub.sendMessage('controller.update_progress_label', message=message, label_pos=label_pos)
def update_output_text(self, message):
pub.sendMessage('controller.update_output_text', message='%s\n' % message)
def update_gauge(self, rows_read=1, message=None, gauge_pos=1, label_pos=1, setvalue=None):
"""
Updates the gauge based on `self.rows_read`
:return: None
"""
if message is not None:
self.update_progress_label(message, label_pos=label_pos)
if setvalue is not None:
value = setvalue
else:
self.rows_read += rows_read
try:
value = (float(self.rows_read) / float(self.total_rows_to_read)) * 100.0
except ZeroDivisionError:
return
pub.sendMessage('controller.update_gauge', value=value, gauge_pos=gauge_pos)
def get_named_range(self, sheet, coord):
"""
Returns the range of cells contained in a given worksheet by a given set of coordinates
:param sheet: string like.
Name of the worksheet
:param coord: string like
String representation of sheet coordinates
:return: Range of cell(s)
"""
ws = self.workbook[sheet]
return ws[coord]
def get_named_range_value(self, sheet, coord):
"""
Gets the value of the cell(s) in a given worksheet at a given set of coordinates
:param sheet: string like.
Name of the worksheet with the named range
:param coord: string like.
String representation of the named range coordinate (e.g. '$A$1')
:return: Value(s) contained in the named range given by `coord`
"""
value = self.get_named_range(sheet, coord)
if isinstance(value, tuple):
results = []
for v in value:
results.append(v[0].value)
# value = [v.value for v in value]
return results
elif hasattr(value, 'value'):
return value.value
return value
def get_named_range_cell_value(self, named_range):
"""
Gets the value of the cell given by named_range. The passed in named range
should reference only a single cell.
:param named_range: string like.
Name of the named range
:return:
"""
try:
nr = self.workbook.defined_names[named_range]
return self.get_named_range_value(*next(nr.destinations))
except KeyError:
return None
def parse_name(self, fullname): # type: (str) -> dict
"""
Parses a full name contained in a string and returns a dict representation
of the name. Also removes trailing/leading whitespace of the names.
If `fullname` does not contain a comma, it's assumed `fullname` is formatted as:
"<first name> <middles name(s)> <last name>"
If `fullname` contains a comma (e.g. "Doe, John"), then it is assumed `fullname`
if formatted as:
"<last name>, <first name> <middle name(s)>"
:param fullname:
:return:
"""
values = re.split(r'\s+', fullname)
if any([',' in name for name in values]):
# `fullname` contained a comma (formatted as "<last>, <first> <middle>")
# so do a little rearranging.
lastname = values.pop(0).replace(',', '')
try:
firstname = values.pop(0)
except IndexError:
firstname = ''
values = [firstname] + values + [lastname]
names = {
'first': values[0],
'last': values[-1],
'middle': ' '.join(values[1:-1])
}
return names
def create_action(self, start_date, end_date, utcoffset, method, commit=False): # type: (datetime, datetime, int, Methods, bool) -> Actions
"""
Creates an ODM2 Actions object
:param start_date: datetime like
:param end_date: datetime like
:param utcoffset: int like
:param method: Methods object
:param commit: bool
:return:
"""
utcoffset = int(utcoffset)
return self.create(Actions, commit=commit, **{
'MethodObj': method,
'ActionTypeCV': "Observation",
'BeginDateTime': start_date,
'BeginDateTimeUTCOffset': utcoffset,
'EndDateTime': end_date,
'EndDateTimeUTCOffset': -7
})
def create_feature_action(self, sampling_feature, action, commit=False): # type: (SamplingFeatures, Actions, bool) -> FeatureActions
return self.create(FeatureActions, commit=commit, **{
'SamplingFeatureObj': sampling_feature,
'ActionObj': action
})
def create_action_by(self, affiliation, action, commit=False): # type: (Affiliations, Actions, bool) -> ActionBy
return self.create(ActionBy, commit=commit, **{
'AffiliationObj': affiliation,
'ActionObj': action,
'IsActionLead': True
})
def parse_people_and_orgs(self):
self.update_progress_label('Reading Organizations')
organization_table = self.tables.get('Organizations', DataFrame())
for _, row in organization_table.iterrows():
params = {
'OrganizationTypeCV': row.get('Organization Type'),
'OrganizationCode': row.get('Organization Code'),
'OrganizationName': row.get('Organization Name')
}
# check if params has required fields
assert all(params.values()), 'Values = %s ' % str(params.values())
# add non required fields
params.update(OrganizationLink=row.get('Organization Link', None),
OrganizationDescription=row.get('Organization Description', None))
org = self.get_or_create(Organizations, params, filter_by='OrganizationName', commit=False)
self.orgs[row.get('Organization Name')] = org # save this for later when we create Affiliations
self.update_gauge()
self.session.commit()
# Create Person and Affiliation objects
self.update_progress_label('Reading People')
people_table = self.tables.get('People', DataFrame())
for _, row in people_table.iterrows(): # type: (any, DataFrame)
row.fillna(value='', inplace=True) # replace NaN values with empty string
person_params = {
'PersonFirstName': row.get('First Name'),
'PersonLastName': row.get('Last Name'),
'PersonMiddleName': row.get('Middle Name')
}
if NaT in person_params.values():
continue
person = self.get_or_create(People, person_params)
aff_params = {
'AffiliationStartDate': row.get('Affiliation Start Date'),
'AffiliationEndDate': row.get('Affiliation End Date'),
'PrimaryPhone': row.get('Primary Phone'),
'PrimaryEmail': row.get('Primary Email'),
'PrimaryAddress': row.get('Primary Address'),
'PersonLink': row.get('Person Link'),
'OrganizationObj': self.orgs.get(row.get('Organization Name')),
'PersonObj': person
}
start_date = aff_params['AffiliationStartDate']
aff_params['AffiliationStartDate'] = datetime(year=start_date.year, month=start_date.month,
day=start_date.day, hour=start_date.hour,
minute=start_date.minute, second=start_date.second)
del aff_params['AffiliationEndDate']
aff = self.get_or_create(Affiliations, aff_params, filter_by='PersonID')
self.affiliations[row.get('Full Name')] = aff
self.update_gauge()
def parse_datasets(self):
self.update_progress_label('parsing datasets')
dataset_uuid = self.get_named_range_cell_value('DatasetUUID')
dataset_type = self.get_named_range_cell_value('DatasetType')
dataset_code = self.get_named_range_cell_value('DatasetCode')
dataset_title = self.get_named_range_cell_value('DatasetTitle')
dataset_abstract = self.get_named_range_cell_value('DatasetAbstract')
params = {
'DataSetUUID': dataset_uuid,
'DataSetTypeCV': dataset_type,
'DataSetCode': dataset_code,
'DataSetTitle': dataset_title,
'DataSetAbstract': dataset_abstract
}
self.data_set = self.get_or_create(DataSets, params, filter_by=['DataSetCode'])
def parse_methods(self, table=None):
"""
Parse Methods recorded in the excel template
:param table: A dataframe containing the Method table data
:return: None
"""
if table is None:
table = self.tables.get('Methods', DataFrame()) # type: DataFrame
# Force values in 'Method Code' column to be strings
table['Method Code'] = table['Method Code'].astype(str)
self.update_progress_label('Reading Methods table')
for _, row in table.iterrows():
self.methods[row.get('Method Code', '').lower()] = self.parse_method(**row)
self.session.commit()
self.update_gauge(table.shape[0])
def parse_method(self, **kwargs):
org = self.orgs.get(kwargs.get('Organization Name'))
params = {
'MethodTypeCV': kwargs.get('Method Type'),
'MethodCode': kwargs.get('Method Code'),
'MethodName': kwargs.get('Method Name')
}
# check if params has required fields
assert all(params.values()), 'Values = %s ' % str(params.values())
# After checking for required fields, add the non required field
params.update(MethodLink=kwargs.get('MethodLink'),
MethodDescription=kwargs.get('Method Description'),
OrganizationObj=org)
return self.get_or_create(Methods, params, filter_by='MethodCode', commit=False)
def parse_variables(self):
table = self.tables.get('Variables', DataFrame())
table.replace({'NULL': None}, inplace=True)
self.update_progress_label('Reading Variables table')
for _, row in table.iterrows():
params = {
'VariableTypeCV': row.get('Variable Type'),
'VariableCode': row.get('Variable Code'),
'VariableNameCV': row.get('Variable Name'),
'NoDataValue': row.get('No Data Value')
}
assert(all(params.values()))
params.update(VariableDefinition=row.get('Variable Definition'),
SpeciationCV=row.get('Speciation'))
variable = self.get_or_create(Variables, params, filter_by=['VariableCode'], check_fields=['NoDataValue'], commit=False)
self.variables[params.get('VariableCode').lower()] = variable
self.session.commit()
self.update_gauge(table.shape[0])
def parse_units(self):
self.update_progress_label('Reading Units')
table = self.tables.get('Units', DataFrame())
for _, row in table.iterrows():
params = {
'UnitsTypeCV': row.get('Units Type'),
'UnitsAbbreviation': row.get('Units Abbreviation'),
'UnitsName': row.get('Units Name')
}
assert(all(params.values()))
params.update(UnitsLink=row.get('Units Link'))
unit = self.get_or_create(Units, params, filter_by=['UnitsName', 'UnitsAbbreviation', 'UnitsTypeCV'],
check_fields=['UnitsTypeCV'])
self.units[params.get('UnitsName').lower()] = unit
self.update_gauge(table.shape[0])
def parse_spatial_reference(self):
"""
Parse spatial references
:return: None
"""
self.update_progress_label('Reading SpatialReferences table')
table = self.tables.get('SpatialReferences', DataFrame())
for _, row in table.iterrows():
params = {
'SRSCode': row.get('SRSCode'),
'SRSName': row.get('SRSName'),
'SRSDescription': row.get('SRSDescription'),
'SRSLink': row.get('SRSLink'),
}
assert(params.get('SRSName'))
sref = self.get_or_create(SpatialReferences, params, filter_by=['SRSCode'], commit=False)
self.spatial_references[row.get('SRSName', '').lower()] = sref
self.session.commit()
def parse_processing_level(self):
self.update_progress_label('Reading ProcessingLevels table')
# processing_codes = self.get_named_range_cell_value('ProcessingLevelCodes')
# processing_codes = [code for code in processing_codes if code is not None]
table = self.tables.get('ProcessingLevels', DataFrame())
if 'Processing Level Code' not in table.keys():
raise ParserException('Processing Level Codes not found. (Processing Level Information probably not formatted as a table in excel)')
table['ProcessingLevelCodes'] = table['Processing Level Code'].astype(int).astype(str)
for _, row in table.iterrows():
params = {
'ProcessingLevelCode': str(int(row.get('Processing Level Code'))),
'Definition': row.get('Definition'),
'Explanation': row.get('Explanation')
}
# assert(params.get('ProcessingLevelCode', False))
plvl = self.get_or_create(ProcessingLevels, params, filter_by=['ProcessingLevelCode'])
self.processing_levels[params.get('ProcessingLevelCode')] = plvl
self.update_gauge()
self.session.commit()
def get_table_name_ranges(self):
"""
Returns a list of the name range that have a table.
The name range should contain the cells locations of the data.
:rtype: list
"""
CONST_NAME = "_Table"
table_name_range = {}
for name_range in self.name_ranges:
if CONST_NAME in name_range.name:
sheet = name_range.attr_text.split('!')[0]
sheet = sheet.replace('\'', '')
if sheet in table_name_range:
table_name_range[sheet].append(name_range)
else:
table_name_range[sheet] = [name_range]
return table_name_range
def get_range_address(self, named_range):
"""
Depracated
:param named_range:
:return:
"""
if named_range is not None:
return named_range.attr_text.split('!')[1].replace('$', '')
return None
def get_range_value(self, range_name, sheet):
"""
Depracated
:param range_name:
:param sheet:
:return:
"""
value = None
named_range = self.workbook.get_named_range(range_name)
range_ = self.get_range_address(named_range)
if range_:
value = sheet[range_].value
return value
def get_sheet_and_table(self, sheet_name):
"""
Depracated
:param sheet_name:
:return:
"""
if sheet_name not in self.tables:
return [], []
sheet = self.workbook.get_sheet_by_name(sheet_name)
tables = self.tables[sheet_name]
return sheet, tables | bsd-3-clause |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/plot/grids.py | 1 | 6267 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.plot.grids Plot a dust grid wireframe to PDF
#
# The function in this module creates a PDF plot from a sequence of grid coordinates
# provided as an input text file, directly creating a PDF file (i.e. without using matplotlib).
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import os
import numpy as np
from reportlab.pdfgen import canvas
# Import the relevant PTS classes and modules
from ..tools import archive as arch
# -----------------------------------------------------------------
# This function creates a plot for each "gridxx.dat" file in the output of the specified simulation.
# The plots are saved in PDF format and are placed next to the original file(s) with
# the same name but a different extension.
#
# There are two format variations for 2D and 3D information, respectively. The 2D format describes the
# intersection of a dust grid structure with one of the coordinate planes. The 3D format fully describes
# all or part of the dust cells in the grid. Each line in the file contains two (2D) or three (3D)
# coordinates seperated by whitespace, or is empty. Consecutive nonempty lines represent a sequence of
# "lineto" commands; an empty line marks a "moveto" command.
#
# The function takes the following arguments:
# - simulation: the SkirtSimulation object representing the simulation to be handled
# - figsize: the horizontal and vertical size of the output figure in inch (!); default is 8 x 8 inch
#
def plotgrids(simulation, figsize=(8,8), output_path=None, silent=False, prefix=None):
for gridfile in simulation.gridxxdatpaths():
plotfile = gridfile[:-4] + ".pdf"
if output_path is not None:
if prefix is None: prefix = ""
else: prefix = prefix + "_"
plotfile = os.path.join(output_path, prefix + os.path.basename(plotfile))
# setup the figure with the appropriate size (in points)
figwidth = 72*figsize[0]
figheight = 72*figsize[1]
if figwidth==figheight: figheight+=2 # to ensure portrait orientation when printed
fig = canvas.Canvas(plotfile, pagesize=(figwidth,figheight))
fig.setAuthor("Python Toolkit for SKIRT")
fig.setLineWidth(0.1)
# determine the format type from the first nonempty line (3D format has 3 columns, 2D format has 2 columns)
for line in arch.opentext(gridfile):
form = len(line.split())
if form > 0: break
if form == 2:
# --- 2D format ---
# determine the extent of the grid being plotted
xmin, ymin, xmax, ymax = float('Inf'), float('Inf'), float('-Inf'), float('-Inf')
for line in arch.opentext(gridfile):
coords = line.split()
if len(coords)==2:
x, y = float(coords[0]), float(coords[1])
xmin, ymin, xmax, ymax = min(xmin,x), min(ymin,y), max(xmax,x), max(ymax,y)
# determine the scales and offsets to fit and center the grid in the figure
xs = figwidth*0.95/(xmax-xmin)
ys = figheight*0.95/(ymax-ymin)
xo = (figwidth-xs*(xmax-xmin))/2. - xmin*xs
yo = (figheight-ys*(ymax-ymin))/2. - ymin*ys
# for each set of consecutive nonempty lines in the file, draw the line segments
path = None
for line in arch.opentext(gridfile):
coords = line.split()
if len(coords)==0 and path != None:
fig.drawPath(path)
path = None
if len(coords)==2:
x, y = xo+xs*float(coords[0]), yo+ys*float(coords[1])
if path == None:
path = fig.beginPath()
path.moveTo(x,y)
else:
path.lineTo(x,y)
else:
# --- 3D format ---
# determine the extent of the grid being plotted (largest half-width in all directions)
extent = 0.
for line in arch.opentext(gridfile):
coords = line.split()
if len(coords) == 3:
extent = max(extent, np.max(np.abs(np.array(map(float,coords)))))
# determine the scale and offsets to fit and center the grid in the figure
s = min(figwidth, figheight) * 0.95 / (2*extent) / np.sqrt(3)
xo = extent*s + (figwidth-s*(2*extent))/2.
yo = extent*s + (figheight-s*(2*extent))/2.
# set the viewing angles
inclination, azimuth = 65, 40
costheta = np.cos(inclination*np.pi/180.)
sintheta = np.sin(inclination*np.pi/180.)
cosphi = np.cos(azimuth*np.pi/180.)
sinphi = np.sin(azimuth*np.pi/180.)
# for each set of consecutive nonempty lines in the file, draw the line segments
path = None
for line in arch.opentext(gridfile):
coords = line.split()
if len(coords)==0 and path != None:
fig.drawPath(path)
path = None
if len(coords)==3:
x, y, z = map(float,coords)
xp = - sinphi*x + cosphi*y
yp = - cosphi*costheta*x - sinphi*costheta*y + sintheta*z
xf = xo+s*xp
yf = yo+s*yp
if path == None:
path = fig.beginPath()
path.moveTo(xf,yf)
else:
path.lineTo(xf,yf)
# save the figure
fig.showPage()
fig.save()
if not silent: print("Created PDF grid plot file " + plotfile)
# -----------------------------------------------------------------
| mit |
fyffyt/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
kreczko/rootpy | setup.py | 1 | 4867 | #!/usr/bin/env python
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
import sys
# check Python version
if sys.version_info < (2, 6):
sys.exit("rootpy only supports python 2.6 and above")
# check that ROOT can be imported
try:
import ROOT
except ImportError:
sys.exit("ROOT cannot be imported. Is ROOT installed with PyROOT enabled?")
ROOT.PyConfig.IgnoreCommandLineOptions = True
# check that we have at least the minimum required version of ROOT
if ROOT.gROOT.GetVersionInt() < 52800:
sys.exit("rootpy requires at least ROOT 5.28/00; "
"You have ROOT {0}.".format(ROOT.gROOT.GetVersion()))
import os
# Prevent distutils from trying to create hard links
# which are not allowed on AFS between directories.
# This is a hack to force copying.
try:
del os.link
except AttributeError:
pass
try:
import setuptools
from pkg_resources import parse_version, get_distribution
# check that we have setuptools after the merge with distribute
setuptools_dist = get_distribution('setuptools')
if setuptools_dist.parsed_version < parse_version('0.7'):
raise ImportError(
"setuptools {0} is currently installed".format(
setuptools_dist.version))
except ImportError as ex:
sys.exit(
"{0}\n\n"
"rootpy requires that at least setuptools 0.7 is installed:\n\n"
"wget https://bootstrap.pypa.io/ez_setup.py\n"
"python ez_setup.py --user\n\n"
"You might need to add the --insecure option to the last command above "
"if using an old version of wget.\n\n"
"If you previously had distribute installed, "
"you might need to manually uninstall the distribute-patched "
"setuptools before upgrading your setuptools. "
"See https://pypi.python.org/pypi/setuptools "
"for further details.".format(ex))
from setuptools import setup, find_packages
from glob import glob
from os.path import join, abspath, dirname, isfile, isdir
local_path = dirname(abspath(__file__))
# setup.py can be called from outside the rootpy directory
os.chdir(local_path)
sys.path.insert(0, local_path)
# check for custom args
# we should instead extend distutils...
filtered_args = []
release = False
devscripts = False
for arg in sys.argv:
if arg == '--release':
# --release sets the version number before installing
release = True
elif arg == '--devscripts':
devscripts = True
else:
filtered_args.append(arg)
sys.argv = filtered_args
if release:
# remove dev from version in rootpy/info.py
import shutil
shutil.move('rootpy/info.py', 'info.tmp')
dev_info = ''.join(open('info.tmp', 'r').readlines())
open('rootpy/info.py', 'w').write(
dev_info.replace('.dev0', ''))
exclude = []
elif sys.version_info < (3, 0):
exclude = ['*.byteplay3']
else:
exclude = ['*.byteplay']
exec(open('rootpy/info.py').read())
if 'install' in sys.argv:
print(__doc__)
scripts = glob('scripts/*')
if __version__ == 'dev' and devscripts:
scripts.extend(glob('devscripts/*'))
def strip_comments(l):
return l.split('#', 1)[0].strip()
def reqs(*f):
return list(filter(None, [strip_comments(l) for l in open(
join(os.getcwd(), 'requirements', *f)).readlines()]))
setup(
name='rootpy',
version=__version__,
description="A pythonic layer on top of the "
"ROOT framework's PyROOT bindings.",
long_description=''.join(open('README.rst').readlines()[7:]),
author='the rootpy developers',
author_email='rootpy-dev@googlegroups.com',
maintainer='Noel Dawe',
maintainer_email='noel@dawe.me',
license='GPLv3',
url=__url__,
download_url=__download_url__,
packages=find_packages(exclude=exclude),
extras_require={
'tables': reqs('tables.txt'),
'array': reqs('array.txt'),
'matplotlib': reqs('matplotlib.txt'),
'roosh': reqs('roosh.txt'),
'stats': reqs('stats.txt'),
},
scripts=scripts,
entry_points={
'console_scripts': [
'root2hdf5 = rootpy.root2hdf5:main',
'roosh = rootpy.roosh:main',
]
},
package_data={'': [
'etc/*',
'testdata/*.root',
'testdata/*.txt',
'tests/test_compiled.cxx',
]},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Utilities',
'Operating System :: POSIX :: Linux',
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)'
])
if release:
# revert rootpy/info.py
shutil.move('info.tmp', 'rootpy/info.py')
| gpl-3.0 |
samuroi/SamuROI | samuroi/gui/widgets/traceview.py | 1 | 5505 | import numpy
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QDockWidget, QWidget, QHBoxLayout
from .canvasbase import CanvasBase
class TraceViewCanvas(CanvasBase):
"""Plot a set of traces for a selection defined by a QtSelectionModel"""
def __init__(self, segmentation, selectionmodel):
# initialize the canvas where the Figure renders into
super(TraceViewCanvas, self).__init__()
self.segmentation = segmentation
self.selectionmodel = selectionmodel
self.axes.set_xlim(0, self.segmentation.data.shape[-1])
self.axes.autoscale(False, axis='x')
self.figure.set_tight_layout(True)
# a dictionary mapping from mask to all matplotlib line artist
self.__artist = {}
# a dictionary mapping from mask to trace line artist
self.__traces = {}
self.segmentation.active_frame_changed.append(self.on_active_frame_change)
# connect to selection model
self.selectionmodel.selectionChanged.connect(self.on_selection_changed)
self.segmentation.overlay_changed.append(self.update_traces)
self.segmentation.data_changed.append(self.update_traces)
self.segmentation.postprocessor_changed.append(self.update_traces)
self.mpl_connect('button_press_event', self.onclick)
def get_artist(self, mask):
count = sum(1 for artist in self.axes.artists if artist.mask is mask)
if count != 1:
raise Exception("Count = " + str(count))
# find the artist associated with the mask
return next(artist for artist in self.axes.artists if artist.mask is mask)
def update_traces(self):
tmax = self.segmentation.data.shape[-1]
x = numpy.linspace(0, tmax, tmax, False, dtype=int)
for mask, line in self.__traces.items():
tracedata = self.segmentation.postprocessor(mask(self.segmentation.data, self.segmentation.overlay))
line.set_data(x, tracedata)
self.axes.relim()
self.axes.autoscale_view(scalex=False)
self.draw()
def on_mask_change(self, modified_mask):
self.update_traces()
def on_selection_changed(self, selected, deselected):
for range in deselected:
for index in range.indexes():
item = index.internalPointer()
# the selection could also be a whole tree of e.g. BranchMasks
if item.mask is not None and item.mask in self.__artist:
# disconnect from the artist change slot
if hasattr(item.mask, "changed"):
item.mask.changed.remove(self.on_mask_change)
# remove the artist
for artist in self.__artist[item.mask]:
artist.remove()
del self.__artist[item.mask]
del self.__traces[item.mask]
from itertools import cycle
cycol = cycle('bgrcmk').__next__
for range in selected:
for index in range.indexes():
item = index.internalPointer()
if item.mask is not None and item.mask not in self.__artist:
# connect to the masks changed slot
if (hasattr(item.mask, "changed")):
item.mask.changed.append(self.on_mask_change)
artists = []
if not hasattr(item.mask, "color"):
item.mask.color = cycol()
tracedata = self.segmentation.postprocessor(
item.mask(self.segmentation.data, self.segmentation.overlay))
line, = self.axes.plot(tracedata, color=item.mask.color)
self.__traces[item.mask] = line
# put a handle of the mask on the artist
line.mask = item.mask
artists.append(line)
if hasattr(item.mask, "events"):
for x in item.mask.events.indices:
line = self.axes.axvline(x=x - len(item.mask.events.kernel) / 2, c=item.mask.color, lw=2)
artists.append(line)
self.__artist[item.mask] = artists
self.draw()
def on_active_frame_change(self):
if hasattr(self, "active_frame_line"):
self.active_frame_line.remove()
self.active_frame_line = self.axes.axvline(x=self.segmentation.active_frame, color='black', lw=1.)
self.draw()
def onclick(self, event):
if event.xdata is not None:
self.segmentation.active_frame = event.xdata
class TraceViewDockWidget(QDockWidget):
def __init__(self, name, parent, segmentation, selectionmodel):
super(TraceViewDockWidget, self).__init__(name, parent)
self.canvas = TraceViewCanvas(segmentation=segmentation, selectionmodel=selectionmodel)
from PyQt5 import QtCore
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
self.toolbar_navigation = NavigationToolbar2QT(self.canvas, self, coordinates=False)
self.toolbar_navigation.setOrientation(QtCore.Qt.Vertical)
self.toolbar_navigation.setFloatable(True)
self.widget = QWidget()
self.layout = QHBoxLayout()
self.layout.addWidget(self.toolbar_navigation)
self.layout.addWidget(self.canvas)
self.widget.setLayout(self.layout)
self.setWidget(self.widget)
| mit |
zooniverse/aggregation | experimental/algorithms/serengeti_2.py | 2 | 3778 | #!/usr/bin/env python
__author__ = 'greg'
import pymongo
import matplotlib.pyplot as plt
import numpy
import math
import random
import csv
import scipy.stats as stats
# import numpy as np
import pylab as pl
import scipy.special as ss
from scipy.stats import beta as beta_func
# load subject data from CSV
subjects_index = {}
with open('/home/greg/Documents/subject_species_all.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
subjects_index[row[1]] = row[2]
# connect to the mongo server
client = pymongo.MongoClient()
db = client['serengeti_2015-06-27']
classification_collection = db["serengeti_classifications"]
subject_collection = db["serengeti_subjects"]
user_collection = db["serengeti_users"]
# for storing the time stamp of the last classification made by each user
current_sessions = {}
# how many subjects each user has classified during the current session
session_length = {}
# how many blank each user has classified during the current session
num_blanks = {}
# X - percentage of blanks per session
X = []
# Y - session length
Y = []
max_classifications = 0
total_blanks = 0
total = 0.
# scan through *ALL* classifications (add a .skip or .limit to look at subset)
#{"created_at":{"$gte":datetime.datetime(2012,12,10)}}
for ii, classification in enumerate(classification_collection.find().skip(0).limit(4000000)):
if ii % 10000 == 0:
print ii
# use the ip address to identify the user - that way we track non-logged in users as well
if "user_name" not in classification:
continue
#print classification["user_name"]
# id = classification["user_ip"]
id_ = classification["user_name"]
# what time was the classification made at?
time = classification["updated_at"]
# skip tutorial classifications
if "tutorial" in classification:
continue
total += 1
# get the id for the subject and find out whether it was retired as a blank
zoonvierse_id= classification["subjects"][0]["zooniverse_id"]
# you can include blank consensus as well but I found those to be pretty error prone
#subject = subject_collection.find_one({"zooniverse_id":zoonvierse_id})
#if subject["metadata"]["retire_reason"] in ["blank"]:
blank_classification = False
for ann in classification["annotations"]:
if "nothing" in ann:
blank_classification = True
if subjects_index[zoonvierse_id]=="blank":
try:
num_blanks[id_].append(0)
except KeyError:
num_blanks[id_] = [0]
else:
if blank_classification:
try:
num_blanks[id_].append(1)
except KeyError:
num_blanks[id_] = [1]
else:
try:
num_blanks[id_].append(0)
except KeyError:
num_blanks[id_] = [0]
increase = 0
decrease = 0
seg = 4
for id_ in num_blanks:
l = len(num_blanks[id_])
n = num_blanks[id_]
quarter = l/seg
if quarter == 0:
continue
x = sum(n[:quarter+1])/float(quarter)
y = sum(n[-quarter:])/float(quarter)
X.append(x)
Y.append(y)
if x> y:
decrease += 1
if x<y:
increase += 1
print increase
print decrease
meanX = numpy.mean(X)
meanY = numpy.mean(Y)
medianX = numpy.median(X)
medianY = numpy.mean(Y)
print meanX,meanY
plt.plot(X,Y,'.',color="blue")
# plt.plot([meanX,meanX],[0,1],"--",color="green")
# plt.plot([0,1],[meanY,meanY],"--",color="green")
#
# plt.plot([medianX,medianX],[0,1],"--",color="green")
# plt.plot([0,1],[medianY,medianY],"--",color="green")
plt.xlabel("first quarter classifications")
plt.ylabel("last quarter classifications")
plt.xlim((0,1))
plt.ylim((0,1))
plt.show() | apache-2.0 |
bavardage/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 3 | 3212 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print rcusum
print rresid_scaled[skip-1:]
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print 'cusum test reject:'
print ((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any()
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print H
print crit95
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print supb, pval, crit
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
xuewei4d/scikit-learn | examples/miscellaneous/plot_johnson_lindenstrauss_bound.py | 14 | 7785 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\
Johnson%E2%80%93Lindenstrauss_lemma
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils.fixes import parse_version
# `normed` is being deprecated in favor of `density` in histograms
if parse_version(matplotlib.__version__) >= parse_version('2.1'):
density_param = {'density': True}
else:
density_param = {'normed': True}
# %%
# Theoretical bounds
# ==================
# The distortion introduced by a random projection `p` is asserted by
# the fact that `p` is defining an eps-embedding with good probability
# as defined by:
#
# .. math::
# (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
#
# Where u and v are any rows taken from a dataset of shape (n_samples,
# n_features) and p is a projection by a random Gaussian N(0, 1) matrix
# of shape (n_components, n_features) (or a sparse Achlioptas matrix).
#
# The minimum number of components to guarantees the eps-embedding is
# given by:
#
# .. math::
# n\_components \geq 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
#
#
# The first plot shows that with an increasing number of samples ``n_samples``,
# the minimal number of dimensions ``n_components`` increased logarithmically
# in order to guarantee an ``eps``-embedding.
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
plt.show()
# %%
# The second plot shows that an increase of the admissible
# distortion ``eps`` allows to reduce drastically the minimal number of
# dimensions ``n_components`` for a given number of samples ``n_samples``
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
plt.show()
# %%
# Empirical validation
# ====================
#
# We validate the above bounds on the 20 newsgroups text document
# (TF-IDF word frequencies) dataset or on the digits dataset:
#
# - for the 20 newsgroups dataset some 500 documents with 100k
# features in total are projected using a sparse random matrix to smaller
# euclidean spaces with various values for the target number of dimensions
# ``n_components``.
#
# - for the digits dataset, some 8x8 gray level pixels data for 500
# handwritten digits pictures are randomly projected to spaces for various
# larger number of dimensions ``n_components``.
#
# The default dataset is the 20 newsgroups dataset. To run the example on the
# digits dataset, pass the ``--use-digits-dataset`` command line argument to
# this script.
if '--use-digits-dataset' in sys.argv:
data = load_digits().data[:500]
else:
data = fetch_20newsgroups_vectorized().data[:500]
# %%
# For each value of ``n_components``, we plot:
#
# - 2D distribution of sample pairs with pairwise distances in original
# and projected spaces as x and y axis respectively.
#
# - 1D histogram of the ratio of those distances (projected / original).
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
min_dist = min(projected_dists.min(), dists.min())
max_dist = max(projected_dists.max(), dists.max())
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu,
extent=[min_dist, max_dist, min_dist, max_dist])
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, range=(0., 2.), edgecolor='k', **density_param)
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
# %%
# We can see that for low values of ``n_components`` the distribution is wide
# with many distorted pairs and a skewed distribution (due to the hard
# limit of zero ratio on the left as distances are always positives)
# while for larger values of n_components the distortion is controlled
# and the distances are well preserved by the random projection.
# %%
# Remarks
# =======
#
# According to the JL lemma, projecting 500 samples without too much distortion
# will require at least several thousands dimensions, irrespective of the
# number of features of the original dataset.
#
# Hence using random projections on the digits dataset which only has 64
# features in the input space does not make sense: it does not allow
# for dimensionality reduction in this case.
#
# On the twenty newsgroups on the other hand the dimensionality can be
# decreased from 56436 down to 10000 while reasonably preserving
# pairwise distances.
| bsd-3-clause |
MalkIPP/ipp_work | ipp_work/simulations/charges_deductibles.py | 1 | 3436 | # -*- coding: utf-8 -*-
"""
Created on Tue May 5 11:54:08 2015
@author: malkaguillot
"""
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas
from openfisca_france_data.input_data_builders import get_input_data_frame
from openfisca_france_data.surveys import SurveyScenario
from ipp_work.utils import survey_simulate
def test_weights_building():
year = 2009
input_data_frame = get_input_data_frame(year)
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
used_as_input_variables = ['sal', 'cho', 'rst', 'age_en_mois'],
year = year,
)
survey_scenario.new_simulation()
return survey_scenario.simulation
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
year = 2009
ind_variables = ['idmen', 'quimen', 'idfoy', 'quifoy', 'idfam', 'quifam', 'age', 'champm_individus',
'salaire_imposable', 'salaire_net', 'txtppb']
foy_variables = ['irpp', 'cd_grorep', 'cd_pension_alimentaire', 'cd_acc75a', 'cd_deddiv', 'cd_eparet', 'rfr',
'decile_rfr', 'weight_foyers', 'idfoy_original']
used_as_input_variables = ['sal', 'cho', 'rst', 'age_en_mois', 'smic55']
data_frame_by_entity_key_plural, simulation = survey_simulate(used_as_input_variables, year, ind_variables,
foy_variables = foy_variables)
data_frame_individus = data_frame_by_entity_key_plural['individus']
data_frame_foyers = data_frame_by_entity_key_plural['foyers']
# (data_frame_familles.weight_familles * data_frame_familles.af).sum() / 1e9 > 10
import numpy
pension_alim_not_null = data_frame_foyers.cd_pension_alimentaire[data_frame_foyers.cd_pension_alimentaire != 0]
print data_frame_foyers.cd_pension_alimentaire.describe()
print pension_alim_not_null.describe()
epargne_retraite_not_null = data_frame_foyers.cd_eparet[data_frame_foyers.cd_eparet != 0]
print data_frame_foyers.cd_eparet.describe()
print epargne_retraite_not_null.describe()
import matplotlib
matplotlib.pyplot.hist(numpy.histogram(pension_alim_not_null))
import matplotlib.pyplot as plt
cd_pension_alimentaire2 = data_frame_foyers[
data_frame_foyers.cd_pension_alimentaire < 6000] \
.cd_pension_alimentaire.values
# the histogram of the data
n, bins, patches = plt.hist(cd_pension_alimentaire2[cd_pension_alimentaire2 >0], 20)
| agpl-3.0 |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/artist.py | 69 | 33042 | from __future__ import division
import re, warnings
import matplotlib
import matplotlib.cbook as cbook
from transforms import Bbox, IdentityTransform, TransformedBbox, TransformedPath
from path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = 1.0
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self.axes = None
self._remove_method = None
self._url = None
self.x_isdata = True # False to avoid updating Axes.dataLim with x
self.y_isdata = True # with y
self._snap = None
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should set
# the _remove_method attribute directly. This would be a protected
# attribute if Python supported that sort of thing. The callback
# has one parameter, which is the child to be removed.
if self._remove_method != None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property
# of whether or not the artist should affect the limits. Then there
# will be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None: return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
import traceback
L = []
try:
hascursor,info = self.contains(event)
if hascursor:
L.append(self)
except:
traceback.print_exc()
print "while checking",self.__class__
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains): return self._contains(self,mouseevent)
#raise NotImplementedError,str(self.__class__)+" needs 'contains' method"
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False,{}
def set_contains(self,picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside,prop = picker(self,mouseevent)
else:
inside,prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g. the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
"""
self._url = url
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
return self._snap
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
self._snap = snap
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
if not success:
print type(path), type(transform)
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible(): return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k,v in props.items():
func = getattr(self, 'set_'+k, None)
if func is None or not callable(func):
raise AttributeError('Unknown property %s'%k)
func(v)
changed = True
self.eventson = store
if changed: self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self.pchanged()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k,v in kwargs.items():
k = k.lower()
funcName = "set_%s"%k
func = getattr(self,funcName)
ret.extend( [func(v)] )
return ret
def findobj(self, match=None):
"""
pyplot signature:
findobj(o=gcf(), match=None)
Recursively find all :class:matplotlib.artist.Artist instances
contained in self.
*match* can be
- None: return all objects contained in artist (including artist)
- function with signature ``boolean = match(artist)`` used to filter matches
- class instance: eg Line2D. Only return artists of class type
.. plot:: mpl_examples/pylab_examples/findobj_demo.py
"""
if match is None: # always return True
def matchfunc(x): return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of
:class:`Artists`. If a sequence is used, we assume it is a
homogeneous sequence (all :class:`Artists` are of the same
type) and it is your responsibility to make sure this is so.
"""
if cbook.iterable(o) and len(o): o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
Eg., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and callable(getattr(self.o,name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func): continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
Eg., for a line linestyle, return
[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
"""
name = 'set_%s'%attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s'%(self.o,name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None: return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'): continue
o = getattr(self.o, name)
if not callable(o): continue
func = o
if self.is_alias(func): continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. Eg., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None: return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target) for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '='*col0_len + ' ' + '='*col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len+3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len+3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and callable(getattr(o, name))]
#print getters
getters.sort()
lines = []
for name in getters:
func = getattr(o, name)
if self.is_alias(func): continue
try: val = func()
except: continue
if getattr(val, 'shape', ()) != () and len(val)>6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s)>50:
s = s[:50] + '...'
name = self.aliased_name(name[4:])
lines.append(' %s = %s' %(name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: eg :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x): return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(o, property=None):
"""
Return the value of handle property. property is an optional string
for the property you want to return
Example usage::
getp(o) # get all the object properties
getp(o, 'linestyle') # get the linestyle property
*o* is a :class:`Artist` instance, eg
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
o.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(o)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
insp = ArtistInspector(o)
if property is None:
ret = insp.pprint_getters()
print '\n'.join(ret)
return
func = getattr(o, 'get_' + property)
return func()
# alias
get = getp
def setp(h, *args, **kwargs):
"""
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. E.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the matlab(TM) style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', r') # matlab style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(h)
if len(kwargs)==0 and len(args)==0:
print '\n'.join(insp.pprint_setters())
return
if len(kwargs)==0 and len(args)==1:
print insp.pprint_setters(prop=args[0])
return
if not cbook.iterable(h): h = [h]
else: h = cbook.flatten(h)
if len(args)%2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args)-1, 2):
funcvals.append((args[i], args[i+1]))
funcvals.extend(kwargs.items())
ret = []
for o in h:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s"%s
func = getattr(o,funcName)
ret.extend( [func(val)] )
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
kwdocd = dict()
kwdocd['Artist'] = kwdoc(Artist)
| gpl-3.0 |
riddhishb/ipython-notebooks | Poisson Editing/seamlesscloning.py | 1 | 1270 | import cv2
import numpy as np
import matplotlib.pyplot as plt
im_cloned = cv2.imread("stinkbug_cloned.png", cv2.IMREAD_GRAYSCALE)
im_mask = cv2.imread("stinkbug_cloned_mask.png", cv2.IMREAD_GRAYSCALE)
it = 500; # Set number of iterations
im_temp = im_cloned.copy()
im_seamless = im_temp.copy()
sigma = []
for i in range(im_cloned.shape[0]):
for j in range(im_cloned.shape[1]):
if (im_mask[i,j]==255):
sigma.append([i,j])
for a in range(it):
for [i,j] in sigma:
term = 10000
term = term + im_seamless[i+1, j]+im_seamless[i-1, j]+im_seamless[i, j-1]+im_seamless[i, j+1]
if(im_mask[i-1, j]==255):
term = term + im_cloned[i,j]-im_cloned[i-1,j]
if(im_mask[i+1, j]==255):
term = term + im_cloned[i,j]-im_cloned[i+1,j]
if(im_mask[i, j+1]==255):
term = term + im_cloned[i,j]-im_cloned[i,j+1]
if(im_mask[i, j-1]==255):
term = term + im_cloned[i,j]-im_cloned[i,j-1]
im_temp[i,j] = (term-10000)/4
im_seamless = im_temp.copy()
print a
fig, ax = plt.subplots(1, 2)
ax[0].imshow(im_cloned,cmap = 'gray')
ax[0].set_title('Normal Cloning')
ax[1].imshow(im_seamless, cmap='gray')
ax[1].set_title('Seamless Cloning')
plt.show()
| gpl-3.0 |
nettorta/yandex-tank | yandextank/plugins/Phantom/tests/test_reader.py | 2 | 2081 | from threading import Event
import pandas as pd
from yandextank.common.util import FileMultiReader
from yandextank.plugins.Phantom.reader import PhantomReader, PhantomStatsReader, string_to_df_microsec
class TestPhantomReader(object):
def setup_class(self):
stop = Event()
self.multireader = FileMultiReader('yandextank/plugins/Phantom/tests/phout.dat', stop)
stop.set()
def teardown_class(self):
self.multireader.close()
def test_read_all(self):
reader = PhantomReader(
self.multireader.get_file(), cache_size=1024)
df = pd.DataFrame()
for chunk in reader:
df = df.append(chunk)
assert (len(df) == 200)
assert (df['interval_real'].mean() == 11000714.0)
def test_reader_closed(self):
reader = PhantomReader(self.multireader.get_file(), cache_size=64)
frames = [i for i in reader]
result = pd.concat(frames)
assert len(result) == 200
assert (result['interval_real'].mean() == 11000714.0)
def test_reader_us(self):
with open('yandextank/plugins/Phantom/tests/phout.dat') as f:
chunk = f.read()
result = string_to_df_microsec(chunk)
expected = pd.read_pickle('yandextank/plugins/Phantom/tests/expected_df.dat')
result['ts'] -= result['ts'][0]
assert result.equals(expected)
class MockInfo(object):
def __init__(self, steps):
self.steps = steps
class TestStatsReader(object):
def test_closed(self):
STEPS = [[1.0, 1], [1.0, 1], [1.0, 1], [2.0, 1], [2.0, 1], [2.0, 1], [2.0, 1], [2.0, 1], [3.0, 1], [3.0, 1],
[3.0, 1], [3.0, 1], [3.0, 1], [4.0, 1], [4.0, 1], [4.0, 1], [4.0, 1], [4.0, 1], [5.0, 1], [5.0, 1],
[5.0, 1]]
reader = PhantomStatsReader('yandextank/plugins/Phantom/tests/phantom_stat.dat',
MockInfo(STEPS), cache_size=1024 * 10)
reader.close()
stats = reduce(lambda l1, l2: l1 + l2, [i for i in reader])
assert len(stats) == 19
| lgpl-2.1 |
jmargeta/scikit-learn | sklearn/tests/test_preprocessing.py | 3 | 25863 | import warnings
import numpy as np
import numpy.linalg as la
import scipy.sparse as sp
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.sparsefuncs import mean_variance_axis0
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import add_dummy_feature
from sklearn.preprocessing import balance_weights
from sklearn import datasets
from sklearn.linear_model.stochastic_gradient import SGDClassifier
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
"""Check min max scaler on toy data with zero variance features"""
X = [[0., 1., 0.5],
[0., 1., -0.1],
[0., 1., 1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
"""Check that StandardScaler.fit does not change input"""
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sp.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sp.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_warning_scaling_integers():
"""Check warning when scaling integer data"""
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
with warnings.catch_warnings(record=True) as w:
StandardScaler().fit(X)
assert_equal(len(w), 1)
with warnings.catch_warnings(record=True) as w:
MinMaxScaler().fit(X)
assert_equal(len(w), 1)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sp.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sp.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sp.coo_matrix, sp.csc_matrix, sp.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sp.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, 0]])
for init in (np.array, sp.csr_matrix, sp.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(type(X), type(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
def test_label_binarizer():
lb = LabelBinarizer()
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# two-class case
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 2, 2, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_multilabel():
lb = LabelBinarizer()
# test input as lists of tuples
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
got = lb.fit_transform(inp)
assert_array_equal(indicator_mat, got)
assert_equal(lb.inverse_transform(got), inp)
# test input as label indicator matrix
lb.fit(indicator_mat)
assert_array_equal(indicator_mat,
lb.inverse_transform(indicator_mat))
# regression test for the two-class multilabel case
lb = LabelBinarizer()
inp = [[1, 0], [0], [1], [0, 1]]
expected = np.array([[1, 1],
[1, 0],
[0, 1],
[1, 1]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_equal([set(x) for x in lb.inverse_transform(got)],
[set(x) for x in inp])
def test_label_binarizer_errors():
"""Check that invalid arguments yield ValueError"""
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
def test_one_hot_encoder():
"""Test OneHotEncoder's fit and transform."""
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raise when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_label_encoder():
"""Test LabelEncoder's transform and inverse_transform methods"""
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
"""Test fit_transform"""
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_string_labels():
"""Test LabelEncoder's transform and inverse_transform methods with
non-numeric labels"""
le = LabelEncoder()
le.fit(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(le.classes_, ["amsterdam", "paris", "tokyo"])
assert_array_equal(le.transform(["tokyo", "tokyo", "paris"]),
[2, 2, 1])
assert_array_equal(le.inverse_transform([2, 2, 1]),
["tokyo", "tokyo", "paris"])
assert_raises(ValueError, le.transform, ["london"])
def test_label_encoder_errors():
"""Check that invalid arguments yield ValueError"""
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_label_binarizer_iris():
lb = LabelBinarizer()
Y = lb.fit_transform(iris.target)
clfs = [SGDClassifier().fit(iris.data, Y[:, k])
for k in range(len(lb.classes_))]
Y_pred = np.array([clf.decision_function(iris.data) for clf in clfs]).T
y_pred = lb.inverse_transform(Y_pred)
accuracy = np.mean(iris.target == y_pred)
y_pred2 = SGDClassifier().fit(iris.data, iris.target).predict(iris.data)
accuracy2 = np.mean(iris.target == y_pred2)
assert_almost_equal(accuracy, accuracy2)
def test_label_binarizer_multilabel_unlabeled():
"""Check that LabelBinarizer can handle an unlabeled sample"""
lb = LabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(lb.fit_transform(y), Y)
def test_center_kernel():
"""Test that KernelCenterer is equivalent to StandardScaler
in feature space"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sp.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sp.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sp.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sp.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sp.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sp.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_balance_weights():
weights = balance_weights([0, 0, 1, 1])
assert_array_equal(weights, [1., 1., 1., 1.])
weights = balance_weights([0, 1, 1, 1, 1])
assert_array_equal(weights, [1., 0.25, 0.25, 0.25, 0.25])
weights = balance_weights([0, 0])
assert_array_equal(weights, [1., 1.])
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter5/fig_posterior_gaussgauss.py | 3 | 4803 | """
Gaussian Distribution with Gaussian Errors
------------------------------------------
Figure 5.8
The solid lines show marginalized posterior pdfs for :math:`\mu` (left) and
:math:`\sigma` (right) for a Gaussian distribution with heteroscedastic
Gaussian measurement errors (i.e., integrals over :math:`\sigma` and
:math:`\mu` for the two-dimensional distribution shown in figure 5.7). For
comparison, the dashed histograms show the distributions of approximate
estimates for :math:`\mu` and :math:`\sigma` (the median and given by eq. 5.68,
respectively) for 10,000 bootstrap resamples of the same data set. The true
values of :math:`\mu` and :math:`\sigma` are indicated by the vertical dotted
lines.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.stats import median_sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def gaussgauss_logL(xi, ei, mu, sigma):
"""Equation 5.63: gaussian likelihood with gaussian errors"""
ndim = len(np.broadcast(sigma, mu).shape)
xi = xi.reshape(xi.shape + tuple(ndim * [1]))
ei = ei.reshape(ei.shape + tuple(ndim * [1]))
s2_e2 = sigma ** 2 + ei ** 2
return -0.5 * np.sum(np.log(s2_e2) + (xi - mu) ** 2 / s2_e2,
-1 - ndim)
def approximate_mu_sigma(xi, ei, axis=None):
"""Estimates of mu0 and sigma0 via equations 5.67 - 5.68"""
if axis is not None:
xi = np.rollaxis(xi, axis)
ei = np.rollaxis(ei, axis)
axis = 0
mu_approx, sigmaG = median_sigmaG(xi, axis=axis)
e50 = np.median(ei, axis=axis)
var_twiddle = (sigmaG ** 2 + ei ** 2 - e50 ** 2)
sigma_twiddle = np.sqrt(np.maximum(0, var_twiddle))
med = np.median(sigma_twiddle, axis=axis)
mu = np.mean(sigma_twiddle, axis=axis)
zeta = np.ones_like(mu)
zeta[mu != 0] = med[mu != 0] / mu[mu != 0]
var_approx = zeta ** 2 * sigmaG ** 2 - e50 ** 2
sigma_approx = np.sqrt(np.maximum(0, var_approx))
return mu_approx, sigma_approx
#--------------------------------------------------
# Generate data
np.random.seed(5)
mu_true = 1.
sigma_true = 1.
N = 10
ei = 3 * np.random.random(N)
xi = np.random.normal(mu_true, np.sqrt(sigma_true ** 2 + ei ** 2))
sigma = np.linspace(0.001, 5, 70)
mu = np.linspace(-3, 5, 70)
logL = gaussgauss_logL(xi, ei, mu, sigma[:, np.newaxis])
logL -= logL.max()
L = np.exp(logL)
p_sigma = L.sum(1)
p_sigma /= (sigma[1] - sigma[0]) * p_sigma.sum()
p_mu = L.sum(0)
p_mu /= (mu[1] - mu[0]) * p_mu.sum()
#------------------------------------------------------------
# Compute bootstrap estimates
Nbootstraps = 10000
indices = np.random.randint(0, len(xi), (len(xi), 10000))
xi_boot = xi[indices]
ei_boot = ei[indices]
mu_boot, sigma_boot = approximate_mu_sigma(xi_boot, ei_boot, 0)
#--------------------------------------------------
# Plot data
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.24,
bottom=0.15, top=0.9)
# first plot the histograms for mu
ax = fig.add_subplot(121)
# plot the marginalized distribution
ax.plot(mu, p_mu, '-k', label='marginalized')
# plot the bootstrap distribution
bins = np.linspace(-3, 5, 14)
ax.hist(mu_boot, bins, histtype='step', linestyle='dashed',
color='b', normed=True, label='approximate')
# plot vertical line: newer matplotlib versions can use ax.vlines(x)
ax.plot([mu_true, mu_true], [0, 1.0], ':k', lw=1)
ax.set_xlabel(r'$\mu$')
ax.set_ylabel(r'$p(\mu)$')
ax.set_ylim(0, 1.0)
# first plot the histograms for sigma
ax = fig.add_subplot(122)
# plot the marginalized distribution
ax.plot(sigma, p_sigma, '-k', label='marginalized')
# plot the bootstrap distribution
bins = np.linspace(0, 5, 31)
ax.hist(sigma_boot, bins, histtype='step', linestyle='dashed',
color='b', normed=True, label='approximate')
# plot vertical line: newer matplotlib versions can use ax.vlines(x)
ax.plot([sigma_true, sigma_true], [0, 1.0], ':k', lw=1)
ax.set_xlabel(r'$\sigma$')
ax.set_ylabel(r'$p(\sigma)$')
ax.legend(loc=1, prop=dict(size=8))
ax.set_xlim(0, 5.0)
ax.set_ylim(0, 1.0)
plt.show()
| bsd-2-clause |
phageParser/phageParser | populate.py | 3 | 6935 | #!/usr/bin/env python
import argparse
import os
import pickle
import pandas
import requests
from Bio import Entrez, SeqIO
from lxml import html, etree
from tqdm import tqdm
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'phageAPI.settings')
import django
django.setup()
from util.acc import read_accession_file
from util.prunedict import prune_dict
from util import fetch
from restapi.models import (
Organism,
Spacer,
Repeat,
LocusSpacerRepeat,
AntiCRISPR,
Locus
)
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def populate_organism():
def add_organism(name, accession):
# get the object, this also checks for duplicates
o, created = Organism.objects.get_or_create(
name=name, accession=accession)
return o
def merge_acc_names(accession_list):
acc_name_dict = {}
db = "nuccore"
# Doing batches of 200 to make sure requests to NCBI are not too big
for i in range(0, len(accession_list), 200):
j = i + 200
result_handle = Entrez.efetch(
db=db, rettype="gb", id=accession_list[i:j])
# Populate result per organism name
records = SeqIO.parse(result_handle, 'genbank')
for record in tqdm(records):
# Using NCBI name, which should match accession number passed
acc_name_dict[record.name] = record.annotations['organism']
return acc_name_dict
with open(os.path.join(DATA_DIR, 'bac_accession_list.txt')) as f:
acc_name_dict = list(read_accession_file(f))
# acc_name_dict = merge_acc_names(accession_list)
for acc in acc_name_dict:
add_organism(name=acc_name_dict[acc], accession=acc)
def get_spacer_repeat_files():
spath = os.path.join(DATA_DIR, "spacerdatabase.txt")
surl = ('http://crispr.i2bc.paris-saclay.fr/'
'crispr/BLAST/Spacer/Spacerdatabase')
rpath = os.path.join(DATA_DIR, "repeatdatabase.txt")
rurl = 'http://crispr.i2bc.paris-saclay.fr/crispr/BLAST/DR/DRdatabase'
fetch.fetch(spath, surl)
fetch.fetch(rpath, rurl)
return spath, rpath
def repeatfiletodict(rfile):
rdict = {}
repeatrecords = SeqIO.parse(rfile, 'fasta')
for record in repeatrecords:
accessions = record.name.split('|')
sequence = str(record.seq)
for acc in accessions:
rdict[acc] = {'RepeatSeq': sequence}
return rdict
def addspacerstodict(gendict, sfile):
spacerrecords = SeqIO.parse(sfile, 'fasta')
for record in spacerrecords:
accessions = record.name.split('|')
sequence = str(record.seq)
for acc in accessions:
acc_elems = acc.split('_')
order = acc_elems[-1]
acc_id = '_'.join(acc_elems[:-1])
try:
if 'Spacers' in gendict[acc_id]:
gendict[acc_id]['Spacers'][order] = sequence
else:
gendict[acc_id]['Spacers'] = {order: sequence}
except KeyError:
print('Error on accession id: %s' % acc_id)
return gendict
def addpositionstodict(gendict):
print("Downloading position information from web...")
for accidwithloc in tqdm(gendict):
if 'Start' in gendict[accidwithloc]:
continue
accid = '_'.join(accidwithloc.split('_')[:-1])
url = ('http://crispr.i2bc.paris-saclay.fr/crispr/crispr_db.php?'
'checked%5B%5D={}'.format(accid))
page = requests.get(url)
htmltable = html.fromstring(page.content).xpath(
"//table[normalize-space(@class)='primary_table']")[1]
strtable = etree.tostring(htmltable)
# converts to pandas df and then to numpy array then drop titles
arrtable = pandas.read_html(strtable)[0].as_matrix()[2:]
for row in arrtable:
if row[0] in gendict:
gendict[row[0]]['Start'] = row[2]
gendict[row[0]]['Stop'] = row[3]
else:
if row[1] != 'questionable':
print("Can't find %s in local files" % row[0])
return gendict
def populate_fromlocus(locid, locdict):
accid = '_'.join(locid.split('_')[:-1])
organismset = Organism.objects.filter(accession=accid)
if not organismset.exists():
print('Organism with accid %s not found in db' % accid)
return
organism = organismset[0]
repeat, _ = Repeat.objects.get_or_create(sequence=locdict['RepeatSeq'])
loc_start = int(locdict['Start'])
loc_end = int(locdict['Stop'])
locus, _ = Locus.objects.get_or_create(
organism=organism,
genomic_start=loc_start,
genomic_end=loc_end
)
spacers = locdict['Spacers']
for order in sorted(spacers):
spacer, _ = Spacer.objects.get_or_create(sequence=spacers[order])
order = int(order)
lsr, _ = LocusSpacerRepeat.objects.get_or_create(
locus=locus,
spacer=spacer,
repeat=repeat,
order=order
)
spacer.save()
lsr.save()
locus.save()
repeat.save()
organism.save()
def populate_lsrpair():
print('Downloading files and gathering online data.')
sfile, rfile = get_spacer_repeat_files()
gendict = prune_dict(
addpositionstodict(
addspacerstodict(
repeatfiletodict(rfile), sfile)))
with open('dbbackups/genedict.pickle', 'rb') as f:
pickle.dump(gendict, f, protocol=pickle.HIGHEST_PROTOCOL)
print('Created dictionary and dumped data to genedict.pickle')
print("Populating Spacer, Repeat, SpacerRepeatPair, "
"OrganismSpacerRepeatPair tables")
for locid in tqdm(gendict):
populate_fromlocus(locid, gendict[locid])
def populate_anticrispr():
with open(os.path.join(DATA_DIR, 'antiCRISPR_accessions.txt')) as f:
accession_list = list(read_accession_file(f))
print("Fetching AntiCRISPR entries")
result_handle = Entrez.efetch(
db='protein', rettype="fasta", id=accession_list)
for record in tqdm(SeqIO.parse(result_handle, 'fasta')):
spacer, _ = AntiCRISPR.objects.get_or_create(
accession=record.name,
sequence=str(record.seq))
spacer.save()
def main():
parser = argparse.ArgumentParser(
description='Populate the phageParser database with data from NCBI'
)
parser.add_argument(
'email',
nargs=1,
help=('your email address (does not need to be registered, '
'just used to identify you)')
)
args = parser.parse_args()
Entrez.email = args.email
print("Starting organism population")
populate_organism()
print("Starting LSR population")
populate_lsrpair()
print("Starting AntiCRISPR population")
populate_anticrispr()
if __name__ == '__main__':
main()
| mit |
roxyboy/scikit-learn | sklearn/metrics/tests/test_common.py | 83 | 41144 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
LingboTang/LearningFiles | load_dat.py | 1 | 7385 | import os
import sys
import re
import numpy as np
import random
import getopt
import time
import matplotlib.pyplot as plt
from scipy.interpolate import splrep, pchip, splmake, splev, spline, interp1d
import signal
# ========================================= Exception ================================================= #
class Timeout():
class Timeout(Exception):
pass
def __init__(self,sec):
self.sec = sec
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, *args):
signal.alarm(0)
def raise_timeout(self, *args):
raise Timeout.Timeout()
# ========================================= Global Parameter ========================================== #
KB = 1.98722e-3
colors = ["b","g","r","y","k"] * 2
# ========================================= Util Functions ============================================ #
def Beta(k,temp):
return 1 /(k * temp)
def get_step_distances(traj):
distances = []
for i in range(len(traj)):
if i == 0:
dis = 0
else:
dis = traj[i] - traj[0]
distances.append(dis)
return np.array(distances)
def get_delta_dis(traj):
delta_distances = []
for i in range(len(traj)):
if i == 0:
delta_dis = 0
else:
delta_dis = traj[i] - traj[i-1]
delta_distances.append(delta_dis)
return np.array(delta_distances)
def get_delta_forces(forces):
delta_Fs = []
for i in range(len(forces)):
if i == 0:
delta_F = 0
else:
delta_F = forces[i] - forces[i-1]
delta_Fs.append(delta_F)
return np.array(delta_Fs)
def get_ave_forces(forces):
ave_Fs = []
for i in range(len(forces)):
if i == 0:
ave_F = 0
else:
ave_F = (forces[i] + forces[i-1]) / 2
ave_Fs.append(ave_F)
return np.array(ave_Fs)
# FE means free energy, assume the initial FE is 0
def calculate_work(distances, forces):
works = []
work = 0
for i in range(len(forces)):
work = work + forces[i] * distances[i]
works.append(work)
return np.array(works)
def jarzynski_equality_simul_1(works, beta):
FEs = []
for i in range(len(works)):
FEs.append(- 1 / beta * np.log(np.exp(-beta*works[i])))
return np.array(FEs)
def jarzynski_equality_simul_2(works, beta):
Fexp = []
F1 = []
F2 = []
texp = 0
t1 = 0
t2 = 0
for i in range(len(works)):
w = works[i]
texp = texp + np.exp(-works[i] * beta)
t1 = t1 + works[i]
t2 = t2 + works[i] ** 2
Fexp.append(- 1/beta * np.log(texp / 10))
F1.append(t1 / 10)
F2.append(t1 / 10 - t2 / 10 + t1 * t1 /100)
Fexp = np.array(Fexp)
F1 = np.array(F1)
F2 = np.array(F2)
return [Fexp,F1,F2]
def column_wise_vec(data, col):
return np.array([row[col] for row in data])
# ========================================= Main Function ============================================= #
def main(argv):
input_file_name = ""
output_file_name = ""
kelvin = 300 # Default Value
velocity = 0.01
# ============== Getting input options from command line ============== #
try:
opts, args = getopt.getopt(argv, "hi:k:v:",["ifile=","kelvin=","velocity="])
except getopt.GetoptError:
print("Passing invalid options in command line!")
print("Usage: load_dat.py -i <ifile> -k <kelvin> -v <velocity>")
sys.exit(0)
for opt, arg in opts:
if opt == "-h":
print("Usage: load_dat.py -i <ifile> -k <kelvin> -v <velocity>")
sys.exit(1)
elif opt in ("-i", "--ifile"):
input_file_name = arg
output_file_name = re.split(r"([^\\]+)\.dat$",arg)[1] + "_out" + ".dat"
if os.path.exists(os.getcwd()+"/"+output_file_name):
os.system("rm %s"%os.getcwd()+"/"+output_file_name)
elif opt in ("-k","--kevlin"):
kelvin = float(arg)
elif opt in ("-v","--velocity"):
velocity = float(arg)
# ==================== Open the file to read ========================= #
try:
input_file = open(os.getcwd()+ "/" +input_file_name,"r")
except IOError:
print("Can't open the file %s: File not exist or broken!"%input_file_name)
sys.exit(2)
try:
output_file = open(os.getcwd()+ "/" + output_file_name,"w")
except IOError:
print("Can't open the file %s: File not exist or broken!"%out_file_name)
sys.exit(2)
# ==================== Parse input file and get value ================= #
all_lines = []
for line in input_file:
data_line = [float(data) for data in line.strip().split()]
all_lines.append(np.array(data_line))
data_lines = np.array(all_lines)
last_column = len(data_lines[0])
num_trails = len(data_lines[0]) // 2
# ==================== Calculate work for each traj =================== #
time_series = data_lines[:,0]
trajactories = [data_lines[:,col] for col in range(num_trails + 1) if col != 0]
forces = [data_lines[:,col] for col in range(num_trails + 1, last_column)]
# Works
all_works_1 = []
for i in range(num_trails):
delta_distances = get_delta_dis(trajactories[i])
ave_Fs = get_ave_forces(forces[i])
work_of_each_traj = calculate_work(delta_distances, ave_Fs)
all_works_1.append(work_of_each_traj)
all_works_1 = np.array(all_works_1)
# Method 1
all_FEs = []
for i in range(num_trails):
FEs = jarzynski_equality_simul_1(all_works_1[i], Beta(KB,kelvin))
all_FEs.append(FEs)
all_FEs = np.array(all_FEs)
# Method 2
all_FEs_2 = jarzynski_equality_simul_2(all_works_1[0], Beta(KB,kelvin))
ploting_vs = []
for i in range(len(trajactories[0])):
ploting_vs.append(np.array([trajactories[0][i], all_works_1[0][i]]))
ploting_vs.sort(key=lambda pair: pair[0])
ploting_vs = np.array(ploting_vs)
for i in range(len(all_works_1[0])):
row_string = "%.6f"%trajactories[0][i]
for j in range(num_trails):
this_value = "%.6f"%all_works_1[j][i]
row_string = row_string + " " + this_value.rjust(9)
row_string = row_string + "\n"
output_file.write(row_string)
# ====================== Plot the graphs ============================== #
#x = trajactories[0]
#y = all_works_1[0]
#print(x.min())
#print(x.max())
#x_smooth = np.linspace(x.min(),x.max(), 10)
#print(x_smooth)
#try:
# with Timeout(3):
# y_smooth = spline(x, y, x_smooth)
#except Timeout.Timeout:
# print("Timeout!")
# input_file.close()
# sys.exit(3)
'''x = trajactories[0]
y = all_works_1[0]
sort_idx = np.argsort(x)
x_sorted = x[sort_idx]
y_sorted = y[sort_idx]
order = 2
spline = Bspline(x_sorted, y_sorted, order)
y_smooth = spline(x_smooth)'''
plotting_vs = []
for i in range(len(trajactories[0])):
plotting_vs.append(np.array([trajactories[0][i], all_works_1[0][i]]))
plotting_vs.sort(key=lambda pair: pair[0])
plotting_vs = np.array(plotting_vs)
plt.figure(1)
plt.plot(plotting_vs[:, 0], plotting_vs[:, 1], "k")
plt.show()
plt.figure(2)
plt.plot(time_series, trajactories[0], "r")
plt.show()
plt.close()
plt.figure(3)
plt.plot(time_series, forces[0], "g")
plt.show()
plt.close()
plt.figure(4)
plt.plot(trajactories[0], forces[0], "y")
plt.show()
plt.close()
plotting_vs = []
for i in range(len(trajactories[0])):
plotting_vs.append(np.array([trajactories[0][i], all_FEs[0][i]]))
plotting_vs.sort(key=lambda pair: pair[0])
plotting_vs = np.array(plotting_vs)
plt.figure(5)
plt.plot(plotting_vs[:, 0], plotting_vs[:, 1], "b")
plt.show()
plt.close()
plt.figure(6)
plt.plot(trajactories[0],all_FEs_2[0],"b")
plt.show()
plt.close()
# ====================== Final and clean up =========================== #
input_file.close()
output_file.close()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
alexeyum/scikit-learn | sklearn/decomposition/dict_learning.py | 42 | 46134 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000, check_input=True, verbose=0):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose: int
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=verbose, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=check_input)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(fit_intercept=False, verbose=verbose, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
# TODO: Should verbose argument be passed to this?
new_code = orthogonal_mp_gram(
Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization),
tol=None, norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1, check_input=True, verbose=0):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
check_input: boolean, optional
If False, the input arrays X and dictionary will not be checked.
verbose : int, optional
Controls the verbosity; the higher, the more messages. Defaults to 0.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if check_input:
if algorithm == 'lasso_cd':
dictionary = check_array(dictionary, order='C', dtype='float64')
X = check_array(X, order='C', dtype='float64')
else:
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter,
check_input=False,
verbose=verbose)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
check_input=False)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs, check_input=False)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
.. versionadded:: 0.17
*cd* coordinate descent method to improve speed.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
.. versionadded:: 0.17
*lasso_cd* coordinate descent method to improve speed.
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
warmspringwinds/scikit-image | doc/examples/plot_watershed.py | 4 | 2335 | """
======================
Watershed segmentation
======================
The watershed is a classical algorithm used for **segmentation**, that
is, for separating different objects in an image.
Starting from user-defined markers, the watershed algorithm treats
pixels values as a local topography (elevation). The algorithm floods
basins from the markers, until basins attributed to different markers
meet on watershed lines. In many cases, markers are chosen as local
minima of the image, from which basins are flooded.
In the example below, two overlapping circles are to be separated. To
do so, one computes an image that is the distance to the
background. The maxima of this distance (i.e., the minima of the
opposite of the distance) are chosen as markers, and the flooding of
basins from such markers separates the two circles along a watershed
line.
See Wikipedia_ for more details on the algorithm.
.. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
# Generate an initial image with two overlapping circles
x, y = np.indices((80, 80))
x1, y1, x2, y2 = 28, 28, 44, 52
r1, r2 = 16, 20
mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
image = np.logical_or(mask_circle1, mask_circle2)
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndimage.distance_transform_edt(image)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)),
labels=image)
markers = ndimage.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image)
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.7))
ax0, ax1, ax2 = axes
ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax0.set_title('Overlapping objects')
ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
ax1.set_title('Distances')
ax2.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
ax2.set_title('Separated objects')
for ax in axes:
ax.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | examples/release_highlights/plot_release_highlights_0_22_0.py | 10 | 10186 | """
========================================
Release Highlights for scikit-learn 0.22
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.22, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <changes_0_22>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# New plotting API
# ----------------
#
# A new plotting API is available for creating visualizations. This new API
# allows for quickly adjusting the visuals of a plot without involving any
# recomputation. It is also possible to add different plots to the same
# figure. The following example illustrates :class:`~metrics.plot_roc_curve`,
# but other plots utilities are supported like
# :class:`~inspection.plot_partial_dependence`,
# :class:`~metrics.plot_precision_recall_curve`, and
# :class:`~metrics.plot_confusion_matrix`. Read more about this new API in the
# :ref:`User Guide <visualizations>`.
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import plot_roc_curve
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import matplotlib.pyplot as plt
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
svc = SVC(random_state=42)
svc.fit(X_train, y_train)
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X_train, y_train)
svc_disp = plot_roc_curve(svc, X_test, y_test)
rfc_disp = plot_roc_curve(rfc, X_test, y_test, ax=svc_disp.ax_)
rfc_disp.figure_.suptitle("ROC curve comparison")
plt.show()
# %%
# Stacking Classifier and Regressor
# ---------------------------------
# :class:`~ensemble.StackingClassifier` and
# :class:`~ensemble.StackingRegressor`
# allow you to have a stack of estimators with a final classifier or
# a regressor.
# Stacked generalization consists in stacking the output of individual
# estimators and use a classifier to compute the final prediction. Stacking
# allows to use the strength of each individual estimator by using their output
# as input of a final estimator.
# Base estimators are fitted on the full ``X`` while
# the final estimator is trained using cross-validated predictions of the
# base estimators using ``cross_val_predict``.
#
# Read more in the :ref:`User Guide <stacking>`.
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True)
estimators = [
('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
('svr', make_pipeline(StandardScaler(),
LinearSVC(random_state=42)))
]
clf = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression()
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=42
)
clf.fit(X_train, y_train).score(X_test, y_test)
# %%
# Permutation-based feature importance
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X, y = make_classification(random_state=0, n_features=5, n_informative=3)
feature_names = np.array([f'x_{i}' for i in range(X.shape[1])])
rf = RandomForestClassifier(random_state=0).fit(X, y)
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0,
n_jobs=-1)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=feature_names[sorted_idx])
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Native support for missing values for gradient boosting
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
X = np.array([0, 1, 2, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
gbdt = HistGradientBoostingClassifier(min_samples_leaf=1).fit(X, y)
print(gbdt.predict(X))
# %%
# Precomputed sparse nearest neighbors graph
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
from sklearn.neighbors import KNeighborsTransformer
from sklearn.manifold import Isomap
from sklearn.pipeline import make_pipeline
X, y = make_classification(random_state=0)
with TemporaryDirectory(prefix="sklearn_cache_") as tmpdir:
estimator = make_pipeline(
KNeighborsTransformer(n_neighbors=10, mode='distance'),
Isomap(n_neighbors=10, metric='precomputed'),
memory=tmpdir)
estimator.fit(X)
# We can decrease the number of neighbors and the graph will not be
# recomputed.
estimator.set_params(isomap__n_neighbors=5)
estimator.fit(X)
# %%
# KNN Based Imputation
# ------------------------------------
# We now support imputation for completing missing values using k-Nearest
# Neighbors.
#
# Each sample's missing values are imputed using the mean value from
# ``n_neighbors`` nearest neighbors found in the training set. Two samples are
# close if the features that neither is missing are close.
# By default, a euclidean distance metric
# that supports missing values,
# :func:`~metrics.nan_euclidean_distances`, is used to find the nearest
# neighbors.
#
# Read more in the :ref:`User Guide <knnimpute>`.
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
print(imputer.fit_transform(X))
# %%
# Tree pruning
# ------------
#
# It is now possible to prune most tree-based estimators once the trees are
# built. The pruning is based on minimal cost-complexity. Read more in the
# :ref:`User Guide <minimal_cost_complexity_pruning>` for details.
X, y = make_classification(random_state=0)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0).fit(X, y)
print("Average number of nodes without pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
rf = RandomForestClassifier(random_state=0, ccp_alpha=0.05).fit(X, y)
print("Average number of nodes with pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])))
# %%
# Retrieve dataframes from OpenML
# -------------------------------
# :func:`datasets.fetch_openml` can now return pandas dataframe and thus
# properly handle datasets with heterogeneous data:
from sklearn.datasets import fetch_openml
titanic = fetch_openml('titanic', version=1, as_frame=True)
print(titanic.data.head()[['pclass', 'embarked']])
# %%
# Checking scikit-learn compatibility of an estimator
# ---------------------------------------------------
# Developers can check the compatibility of their scikit-learn compatible
# estimators using :func:`~utils.estimator_checks.check_estimator`. For
# instance, the ``check_estimator(LinearSVC())`` passes.
#
# We now provide a ``pytest`` specific decorator which allows ``pytest``
# to run all checks independently and report the checks that are failing.
#
# ..note::
# This entry was slightly updated in version 0.24, where passing classes
# isn't supported anymore: pass instances instead.
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import parametrize_with_checks
@parametrize_with_checks([LogisticRegression(), DecisionTreeRegressor()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
# %%
# ROC AUC now supports multiclass classification
# ----------------------------------------------
# The :func:`roc_auc_score` function can also be used in multi-class
# classification. Two averaging strategies are currently supported: the
# one-vs-one algorithm computes the average of the pairwise ROC AUC scores, and
# the one-vs-rest algorithm computes the average of the ROC AUC scores for each
# class against all other classes. In both cases, the multiclass ROC AUC scores
# are computed from the probability estimates that a sample belongs to a
# particular class according to the model. The OvO and OvR algorithms support
# weighting uniformly (``average='macro'``) and weighting by the prevalence
# (``average='weighted'``).
#
# Read more in the :ref:`User Guide <roc_metrics>`.
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
X, y = make_classification(n_classes=4, n_informative=16)
clf = SVC(decision_function_shape='ovo', probability=True).fit(X, y)
print(roc_auc_score(y, clf.predict_proba(X), multi_class='ovo'))
| bsd-3-clause |
zorroblue/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 127 | 1270 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
wogsland/QSTK | Bin/Data_CSV.py | 5 | 3301 | #File to read the data from mysql and push into CSV.
# Python imports
import datetime as dt
import csv
import copy
import os
import pickle
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# QSTK imports
from QSTK.qstkutil import qsdateutil as du
import QSTK.qstkutil.DataEvolved as de
def get_data(ls_symbols, ls_keys):
'''
@summary: Gets a data chunk for backtesting
@param dt_start: Start time
@param dt_end: End time
@param ls_symbols: symbols to use
@note: More data will be pulled from before and after the limits to ensure
valid data on the start/enddates which requires lookback/forward
@return: data dictionry
'''
print "Getting Data from MySQL"
# Modify dates to ensure enough data for all features
dt_start = dt.datetime(2005,1,1)
dt_end = dt.datetime(2012, 8, 31)
ldt_timestamps = du.getNYSEdays( dt_start, dt_end, dt.timedelta(hours=16) )
c_da = de.DataAccess('mysql')
ldf_data = c_da.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
return d_data
def read_symbols(s_symbols_file):
ls_symbols=[]
file = open(s_symbols_file, 'r')
for f in file.readlines():
j = f[:-1]
ls_symbols.append(j)
file.close()
return ls_symbols
def csv_sym(sym, d_data, ls_keys, s_directory):
bool_first_iter = True
for key in ls_keys:
if bool_first_iter == True:
df_sym = d_data[key].reindex(columns = [sym])
df_sym = df_sym.rename(columns = {sym : key})
bool_first_iter = False
else:
df_temp = d_data[key].reindex(columns = [sym])
df_temp = df_temp.rename(columns = {sym : key})
df_sym = df_sym.join(df_temp, how= 'outer')
symfilename = sym.split('-')[0]
sym_file = open(s_directory + symfilename + '.csv', 'w')
sym_file.write("Date,Open,High,Low,Close,Volume,Adj Close \n")
ldt_timestamps = list(df_sym.index)
ldt_timestamps.reverse()
for date in ldt_timestamps:
date_to_csv = '{:%Y-%m-%d}'.format(date)
string_to_csv = date_to_csv
for key in ls_keys:
string_to_csv = string_to_csv + ',' + str(df_sym[key][date])
string_to_csv = string_to_csv + '\n'
sym_file.write(string_to_csv)
def main(s_directory, s_symbols_file):
#ls_symbols = read_symbols(s_symbols_file)
ls_symbols = ['ACS-201002','BDK-201003','BJS-201004','BSC-201108','CCT-201111','EQ-200907','JAVA-201002','NCC-200901','NOVL-201104','PBG-201003','PTV-201011','ROH-200904','SGP-200911','SII-201008','WB-200901','WYE-200910','XTO-201006']
ls_keys = ['actual_open', 'actual_high', 'actual_low', 'actual_close', 'volume', 'close']
d_data = get_data(ls_symbols, ls_keys)
# print d_data
print "Creating CSV files now"
for sym in ls_symbols:
print sym
csv_sym(sym,d_data, ls_keys, s_directory)
print "Created all CSV files"
if __name__ == '__main__' :
s_directory = 'MLTData/'
s_directory = os.environ['QSDATA'] + '/Yahoo/'
s_symbols_file1 = 'MLTData/sp5002012.txt'
s_symbols_file2 = 'MLTData/index.txt'
s_symbols_file3 = 'MLTData/sp5002008.txt'
main(s_directory, s_symbols_file3) | bsd-3-clause |
arokem/nipy | nipy/labs/viz_tools/maps_3d.py | 3 | 14510 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
3D visualization of activation maps using Mayavi
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD
import os
import tempfile
# Standard scientific libraries imports (more specific imports are
# delayed, so that the part module can be used without them).
import numpy as np
from scipy import stats
# Local imports
from .anat_cache import mni_sform, mni_sform_inv, _AnatCache
from .coord_tools import coord_transform
# A module global to avoid creating multiple time an offscreen engine.
off_screen_engine = None
################################################################################
# Helper functions
def affine_img_src(data, affine, scale=1, name='AffineImage',
reverse_x=False):
""" Make a Mayavi source defined by a 3D array and an affine, for
wich the voxel of the 3D array are mapped by the affine.
Parameters
-----------
data: 3D ndarray
The data arrays
affine: (4 x 4) ndarray
The (4 x 4) affine matrix relating voxels to world
coordinates.
scale: float, optional
An optional addition scaling factor.
name: string, optional
The name of the Mayavi source created.
reverse_x: boolean, optional
Reverse the x (lateral) axis. Useful to compared with
images in radiologic convention.
Notes
------
The affine should be diagonal.
"""
# Late import to avoid triggering wx imports before needed.
try:
from mayavi.sources.api import ArraySource
except ImportError:
# Try out old install of Mayavi, with namespace packages
from enthought.mayavi.sources.api import ArraySource
center = np.r_[0, 0, 0, 1]
spacing = np.diag(affine)[:3]
origin = np.dot(affine, center)[:3]
if reverse_x:
# Radiologic convention
spacing[0] *= -1
origin[0] *= -1
src = ArraySource(scalar_data=np.asarray(data, dtype=np.float),
name=name,
spacing=scale*spacing,
origin=scale*origin)
return src
################################################################################
# Mayavi helpers
def autocrop_img(img, bg_color):
red, green, blue = bg_color
outline = ( (img[..., 0] != red)
+(img[..., 1] != green)
+(img[..., 2] != blue)
)
outline_x = outline.sum(axis=0)
outline_y = outline.sum(axis=1)
outline_x = np.where(outline_x)[0]
outline_y = np.where(outline_y)[0]
if len(outline_x) == 0:
return img
else:
x_min = outline_x.min()
x_max = outline_x.max()
if len(outline_y) == 0:
return img
else:
y_min = outline_y.min()
y_max = outline_y.max()
return img[y_min:y_max, x_min:x_max]
def m2screenshot(mayavi_fig=None, mpl_axes=None, autocrop=True):
""" Capture a screeshot of the Mayavi figure and display it in the
matplotlib axes.
"""
import pylab as pl
# Late import to avoid triggering wx imports before needed.
try:
from mayavi import mlab
except ImportError:
# Try out old install of Mayavi, with namespace packages
from enthought.mayavi import mlab
if mayavi_fig is None:
mayavi_fig = mlab.gcf()
else:
mlab.figure(mayavi_fig)
if mpl_axes is not None:
pl.axes(mpl_axes)
filename = tempfile.mktemp('.png')
mlab.savefig(filename, figure=mayavi_fig)
image3d = pl.imread(filename)
if autocrop:
bg_color = mayavi_fig.scene.background
image3d = autocrop_img(image3d, bg_color)
pl.imshow(image3d)
pl.axis('off')
os.unlink(filename)
# XXX: Should switch back to previous MPL axes: we have a side effect
# here.
################################################################################
# Anatomy outline
################################################################################
def plot_anat_3d(anat=None, anat_affine=None, scale=1,
sulci_opacity=0.5, gyri_opacity=0.3,
opacity=None,
skull_percentile=78, wm_percentile=79,
outline_color=None):
""" 3D anatomical display
Parameters
----------
skull_percentile : float, optional
The percentile of the values in the image that delimit the skull from
the outside of the brain. The smaller the fraction of you field of view
is occupied by the brain, the larger this value should be.
wm_percentile : float, optional
The percentile of the values in the image that delimit the white matter
from the grey matter. Typical this is skull_percentile + 1
"""
# Late import to avoid triggering wx imports before needed.
try:
from mayavi import mlab
except ImportError:
# Try out old install of Mayavi, with namespace packages
from enthought.mayavi import mlab
fig = mlab.gcf()
disable_render = fig.scene.disable_render
fig.scene.disable_render = True
if anat is None:
anat, anat_affine, anat_max = _AnatCache.get_anat()
anat_blurred = _AnatCache.get_blurred()
skull_threshold = 4800
inner_threshold = 5000
upper_threshold = 7227.8
else:
from scipy import ndimage
# XXX: This should be in a separate function
voxel_size = np.sqrt((anat_affine[:3, :3]**2).sum()/3.)
skull_threshold = stats.scoreatpercentile(anat.ravel(),
skull_percentile)
inner_threshold = stats.scoreatpercentile(anat.ravel(),
wm_percentile)
upper_threshold = anat.max()
anat_blurred = ndimage.gaussian_filter(
(ndimage.morphology.binary_fill_holes(
ndimage.gaussian_filter(
(anat > skull_threshold).astype(np.float),
6./voxel_size)
> 0.5
)).astype(np.float),
2./voxel_size).T.ravel()
if opacity is None:
try:
from tvtk.api import tvtk
except ImportError:
# Try out old install of Mayavi, with namespace packages
from enthought.tvtk.api import tvtk
version = tvtk.Version()
if (version.vtk_major_version, version.vtk_minor_version) < (5, 2):
opacity = .99
else:
opacity = 1
###########################################################################
# Display the cortical surface (flattenned)
anat_src = affine_img_src(anat, anat_affine, scale=scale, name='Anat')
anat_src.image_data.point_data.add_array(anat_blurred)
anat_src.image_data.point_data.get_array(1).name = 'blurred'
anat_src.image_data.point_data.update()
anat_blurred = mlab.pipeline.set_active_attribute(
anat_src, point_scalars='blurred')
anat_blurred.update_pipeline()
# anat_blurred = anat_src
cortex_surf = mlab.pipeline.set_active_attribute(
mlab.pipeline.contour(anat_blurred),
point_scalars='scalar')
# XXX: the choice in vmin and vmax should be tuned to show the
# sulci better
cortex = mlab.pipeline.surface(cortex_surf,
colormap='copper',
opacity=opacity,
vmin=skull_threshold,
vmax=inner_threshold)
cortex.enable_contours = True
cortex.contour.filled_contours = True
cortex.contour.auto_contours = False
cortex.contour.contours = [0, inner_threshold, upper_threshold]
#cortex.actor.property.backface_culling = True
# XXX: Why do we do 'frontface_culling' to see the front.
cortex.actor.property.frontface_culling = True
cortex.actor.mapper.interpolate_scalars_before_mapping = True
cortex.actor.property.interpolation = 'flat'
# Add opacity variation to the colormap
cmap = cortex.module_manager.scalar_lut_manager.lut.table.to_array()
cmap[128:, -1] = gyri_opacity*255
cmap[:128, -1] = sulci_opacity*255
cortex.module_manager.scalar_lut_manager.lut.table = cmap
if outline_color is not None:
outline = mlab.pipeline.iso_surface(
anat_blurred,
contours=[0.4],
color=outline_color,
opacity=.9)
outline.actor.property.backface_culling = True
fig.scene.disable_render = disable_render
return cortex
################################################################################
# Maps
################################################################################
def plot_map_3d(map, affine, cut_coords=None, anat=None, anat_affine=None,
threshold=None, offscreen=False, vmin=None, vmax=None, cmap=None,
view=(38.5, 70.5, 300, (-2.7, -12, 9.1)),
):
""" Plot a 3D volume rendering view of the activation, with an
outline of the brain.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
affine : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of a 3D cursor to indicate a feature
or a cut, in MNI coordinates and order.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used. If False, no anatomical
image is used.
anat_affine : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
threshold : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
offscreen: boolean, optional
If True, Mayavi attempts to plot offscreen. Will work only
with VTK >= 5.2.
vmin : float, optional
The minimal value, for the colormap
vmax : float, optional
The maximum value, for the colormap
cmap : a callable, or a pylab colormap
A callable returning a (n, 4) array for n values between
0 and 1 for the colors. This can be for instance a pylab
colormap.
Notes
-----
If you are using a VTK version below 5.2, there is no way to
avoid opening a window during the rendering under Linux. This is
necessary to use the graphics card for the rendering. You must
maintain this window on top of others and on the screen.
"""
# Late import to avoid triggering wx imports before needed.
try:
from mayavi import mlab
except ImportError:
# Try out old install of Mayavi, with namespace packages
from enthought.mayavi import mlab
if offscreen:
global off_screen_engine
if off_screen_engine is None:
try:
from mayavi.core.off_screen_engine import OffScreenEngine
except ImportError:
# Try out old install of Mayavi, with namespace packages
from enthought.mayavi.core.off_screen_engine import OffScreenEngine
off_screen_engine = OffScreenEngine()
off_screen_engine.start()
fig = mlab.figure('__private_plot_map_3d__',
bgcolor=(1, 1, 1), fgcolor=(0, 0, 0),
size=(400, 330),
engine=off_screen_engine)
mlab.clf(figure=fig)
else:
fig = mlab.gcf()
fig = mlab.figure(fig, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0),
size=(400, 350))
disable_render = fig.scene.disable_render
fig.scene.disable_render = True
if threshold is None:
threshold = stats.scoreatpercentile(
np.abs(map).ravel(), 80)
contours = []
lower_map = map[map <= -threshold]
if np.any(lower_map):
contours.append(lower_map.max())
upper_map = map[map >= threshold]
if np.any(upper_map):
contours.append(map[map > threshold].min())
###########################################################################
# Display the map using iso-surfaces
if len(contours) > 0:
map_src = affine_img_src(map, affine)
module = mlab.pipeline.iso_surface(map_src,
contours=contours,
vmin=vmin, vmax=vmax)
if hasattr(cmap, '__call__'):
# Stick the colormap in mayavi
module.module_manager.scalar_lut_manager.lut.table \
= (255*cmap(np.linspace(0, 1, 256))).astype(np.int)
else:
module = None
if not anat is False:
plot_anat_3d(anat=anat, anat_affine=anat_affine, scale=1.05,
outline_color=(.9, .9, .9),
gyri_opacity=.2)
###########################################################################
# Draw the cursor
if cut_coords is not None:
x0, y0, z0 = cut_coords
mlab.plot3d((-90, 90), (y0, y0), (z0, z0),
color=(.5, .5, .5), tube_radius=0.25)
mlab.plot3d((x0, x0), (-126, 91), (z0, z0),
color=(.5, .5, .5), tube_radius=0.25)
mlab.plot3d((x0, x0), (y0, y0), (-72, 109),
color=(.5, .5, .5), tube_radius=0.25)
mlab.view(*view)
fig.scene.disable_render = disable_render
return module
def demo_plot_map_3d():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broca area:
x, y, z = -52, 10, 22
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-5:x_map+5, y_map-3:y_map+3, z_map-10:z_map+10] = 1
plot_map_3d(map, mni_sform, cut_coords=(x, y, z))
| bsd-3-clause |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 30 | 3738 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(tf.test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with tf.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(tf.test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(dataframe,
batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with tf.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| mit |
nok/sklearn-porter | sklearn_porter/estimator/classifier/AdaBoostClassifier/__init__.py | 1 | 12399 | # -*- coding: utf-8 -*-
import os
from json import encoder
from json import dumps
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn_porter.estimator.classifier.Classifier import Classifier
class AdaBoostClassifier(Classifier):
"""
See also
--------
sklearn.ensemble.AdaBoostClassifier
http://scikit-learn.org/stable/modules/generated/
sklearn.ensemble.AdaBoostClassifier.html
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'c': {
'if': 'if (features[{0}] {1} {2}) {{',
'else': '} else {',
'endif': '}',
'arr': 'classes[{0}] = {1}',
'indent': ' ',
'join': '; ',
},
'java': {
'if': 'if (features[{0}] {1} {2}) {{',
'else': '} else {',
'endif': '}',
'arr': 'classes[{0}] = {1}',
'indent': ' ',
'join': '; ',
},
'js': {
'if': 'if (features[{0}] {1} {2}) {{',
'else': '} else {',
'endif': '}',
'arr': 'classes[{0}] = {1}',
'indent': ' ',
'join': '; ',
}
}
# @formatter:on
def __init__(self, estimator, target_language='java',
target_method='predict', **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param estimator : AdaBoostClassifier
An instance of a trained AdaBoostClassifier estimator.
:param target_language : string, default: 'java'
The target programming language.
:param target_method : string, default: 'predict'
The target method of the estimator.
"""
super(AdaBoostClassifier, self).__init__(
estimator, target_language=target_language,
target_method=target_method, **kwargs)
# Check the used algorithm type:
if estimator.algorithm != 'SAMME.R':
msg = "The classifier doesn't support the given algorithm %s."
raise ValueError(msg, estimator.algorithm)
# Check type of base estimators:
if not isinstance(estimator.base_estimator, DecisionTreeClassifier):
msg = "The classifier doesn't support the given base estimator %s."
raise ValueError(msg, estimator.base_estimator)
# Check number of base estimators:
if not estimator.n_estimators > 0:
msg = "The classifier hasn't any base estimators."
raise ValueError(msg)
self.estimator = estimator
def export(self, class_name, method_name,
export_data=False, export_dir='.', export_filename='data.json',
export_append_checksum=False, embed_data=True, **kwargs):
"""
Port a trained estimator to the syntax of a chosen programming
language.
Parameters
----------
:param class_name : string
The name of the class in the returned result.
:param method_name: string
The name of the method in the returned result.
:param export_data : bool
Whether the model data should be saved or not.
:param export_dir : string
The directory where the model data should be saved.
:param export_filename : string
The filename of the exported model data.
:param export_append_checksum : bool
Whether to append the checksum to the filename or not.
:param embed_data : bool, default: True
Whether the model data should be embedded in the template or not.
Returns
-------
:return : string
The transpiled algorithm with the defined placeholders.
"""
# Arguments:
self.class_name = class_name
self.method_name = method_name
# Estimator:
est = self.estimator
# Basic parameters:
self.estimators = []
for idx in range(est.n_estimators):
if est.estimator_weights_[idx] > 0:
self.estimators.append(est.estimators_[idx])
self.n_classes = est.n_classes_
self.n_features = est.estimators_[0].n_features_
self.n_estimators = len(self.estimator)
if self.target_method == 'predict':
# Exported:
if export_data and os.path.isdir(export_dir):
self.export_data(export_dir, export_filename,
export_append_checksum)
return self.predict('exported')
# Embedded:
return self.predict('embedded')
def predict(self, temp_type):
"""
Transpile the predict method.
Parameters
----------
:param temp_type : string
The kind of export type (embedded, separated, exported).
Returns
-------
:return : string
The transpiled predict method as string.
"""
# Exported:
if temp_type == 'exported':
temp = self.temp('exported.class')
return temp.format(class_name=self.class_name,
method_name=self.method_name,
n_features=self.n_features)
# Embedded:
if temp_type == 'embedded':
meth = self.create_embedded_meth()
return self.create_embedded_class(meth)
def export_data(self, directory, filename, with_md5_hash=False):
"""
Save model data in a JSON file.
Parameters
----------
:param directory : string
The directory.
:param filename : string
The filename.
:param with_md5_hash : bool
Whether to append the checksum to the filename or not.
"""
model_data = []
for est in self.estimators:
model_data.append({
'childrenLeft': est.tree_.children_left.tolist(),
'childrenRight': est.tree_.children_right.tolist(),
'thresholds': est.tree_.threshold.tolist(),
'classes': [e[0] for e in est.tree_.value.tolist()],
'indices': est.tree_.feature.tolist()
})
encoder.FLOAT_REPR = lambda o: self.repr(o)
json_data = dumps(model_data, sort_keys=True)
if with_md5_hash:
import hashlib
json_hash = hashlib.md5(json_data).hexdigest()
filename = filename.split('.json')[0] + '_' + json_hash + '.json'
path = os.path.join(directory, filename)
with open(path, 'w') as file_:
file_.write(json_data)
def create_branches(self, left_nodes, right_nodes, threshold,
value, features, node, depth, init=False):
"""
Parse and port a single tree estimator.
Parameters
----------
:param left_nodes : object
The left children node.
:param right_nodes : object
The left children node.
:param threshold : object
The decision threshold.
:param value : object
The label or class.
:param features : object
The feature values.
:param node : int
The current node.
:param depth : int
The tree depth.
:param init : bool, default: True
Whether it's the initial tree or not.
Returns
-------
:return out : string
The ported single tree as function or method.
"""
out = ''
if threshold[node] != -2.:
if not init:
out += '\n'
temp = self.temp('if', n_indents=depth, skipping=init)
out += temp.format(features[node], '<=', self.repr(threshold[node]))
if left_nodes[node] != -1.:
out += self.create_branches(
left_nodes, right_nodes, threshold, value,
features, left_nodes[node], depth + 1)
out += '\n'
out += self.temp('else', n_indents=depth)
if right_nodes[node] != -1.:
out += self.create_branches(
left_nodes, right_nodes, threshold, value,
features, right_nodes[node], depth + 1)
out += '\n'
out += self.temp('endif', n_indents=depth)
else:
clazzes = []
temp = self.temp('arr', n_indents=depth)
for i, val in enumerate(value[node][0]):
clazz = temp.format(i, self.repr(val))
clazz = '\n' + clazz
clazzes.append(clazz)
out += self.temp('join').join(clazzes) + self.temp('join')
return out
def create_single_method(self, estimator_index, estimator):
"""
Port a method for a single tree.
Parameters
----------
:param estimator_index : int
The estimator index.
:param estimator : AdaBoostClassifier
The estimator itself.
Returns
-------
:return : string
The created method as string.
"""
feature_indices = []
for i in estimator.tree_.feature:
n_features = estimator.n_features_
if self.n_features > 1 or (self.n_features == 1 and i >= 0):
feature_indices.append([str(j) for j in range(n_features)][i])
tree_branches = self.create_branches(
estimator.tree_.children_left, estimator.tree_.children_right,
estimator.tree_.threshold, estimator.tree_.value,
feature_indices, 0, 1, init=True)
temp_tree = self.temp('embedded.tree')
return temp_tree.format(method_name=self.method_name,
method_index=str(estimator_index),
methods=tree_branches,
n_classes=self.n_classes)
def create_embedded_meth(self):
"""
Build the estimator methods or functions.
Returns
-------
:return : string
The built methods as merged string.
"""
# Generate related trees:
fns = []
for idx, estimator in enumerate(self.estimators):
tree = self.create_single_method(idx, estimator)
fns.append(tree)
fns = '\n'.join(fns)
# Generate method or function names:
fn_names = ''
if self.target_language in ['c', 'java']:
fn_names = []
temp_method_calls = self.temp('embedded.method_calls', n_indents=2,
skipping=True)
for idx, estimator in enumerate(self.estimators):
cl_name = self.class_name
fn_name = self.method_name + '_' + str(idx)
fn_name = temp_method_calls.format(class_name=cl_name,
method_name=fn_name,
method_index=idx)
fn_names.append(fn_name)
fn_names = '\n'.join(fn_names)
fn_names = self.indent(fn_names, n_indents=1, skipping=True)
# Merge generated content:
n_indents = 1 if self.target_language in ['java', 'js'] else 0
temp_method = self.temp('embedded.method')
method = temp_method.format(method_name=self.method_name,
class_name=self.class_name,
method_calls=fn_names, methods=fns,
n_estimators=self.n_estimators,
n_classes=self.n_classes)
return self.indent(method, n_indents=n_indents, skipping=True)
def create_embedded_class(self, method):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
temp_class = self.temp('embedded.class')
return temp_class.format(class_name=self.class_name,
method_name=self.method_name, method=method,
n_features=self.n_features)
| mit |
htimko/ArcPIC | pic2d/GLEanalysis/2Dpic_current_PerProcess.py | 1 | 17220 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010-2015 CERN and Helsinki Institute of Physics.
# This software is distributed under the terms of the
# GNU General Public License version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md. In applying this
# license, CERN does not waive the privileges and immunities granted to it
# by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
#
# Project website: http://arcpic.web.cern.ch/
# Developers: Helga Timko, Kyrre Sjobak
#
# 2Dpic_current_PerProcess.py:
# Plot current per injection process as a function of time
# Data provided by model 'ArcOriginalNewHS',
# which writes file 'arcbound_original.dat' in the expected format.
#
import sys, os, shutil
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib.colors import LogNorm
#from matplotlib.ticker import LogFormatter
#from matplotlib import gridspec
from matplotlib import rcParams, rc
rcParams.update({'text.usetex': True})
PLOTOPTS = 1
if PLOTOPTS == 1: #spread_24x6um_RC_1740V_1pF_1000ohm_bf2_halfgrid
DPI = 500
rcParams.update({'savefig.dpi':DPI})
rc('font',**{'family':'serif','serif':['Times'],'size':8})
textwidth = 449.40363*1.0/72.27 #inches, for 2Dpic paper
FIGSIZE = (0.5*0.95*textwidth, 0.5*0.98*textwidth/1.61803398875)
elif PLOTOPTS == 2: #spread_24x6um_RC_1740V_1pF_1000ohm_bf2_halfgrid_Y2
DPI = 100#500
rcParams.update({'savefig.dpi':DPI})
rc('font',**{'family':'serif','serif':['Times'],'size':8})
textwidth = 449.40363*1.0/72.27 #inches, for 2Dpic paper
FIGSIZE = None#(0.5*0.95*textwidth, 0.5*0.98*textwidth/1.61803398875)
else:
DPI = 300
rcParams.update({'savefig.dpi':DPI})
FIGSIZE = None
#Get the scaling setup
MODLOAD_parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,MODLOAD_parentdir)
from calcScaling import InputFile
inputfile = InputFile("../input.txt")
inputfile.calcBasicParams()
if inputfile.ArcBoundaryName != "ArcOriginalNewHS":
print "ArcBoundaryName must be 'ArcOriginalNewHS' for this analysis to work."
print
print "Usage: 2Dpic_current_PerProcess.py smoothsteps"
smoothsteps = None
if len(sys.argv) == 2:
smoothsteps = int(sys.argv[1])
print "- smoothsteps =", smoothsteps
else:
print "Please specify smoothsteps."
exit(1)
print
def patchArray(ts, columns,patchmode='zero',dt=1):
#Used to "patch" up an array with missing data, i.e. if the disk ran full during simulation
assert dt == 1
fragmentIdx = [0]
prevTS = ts[0]
for i in xrange(1,len(ts)):
if ts[i] != prevTS+dt:
print "\t Found jump != dt, prevTS="+str(prevTS)+", ts[i]="+str(ts[i])+", i=",i
fragmentIdx.append(i)
prevTS = ts[i]
#fragmentIdx.append(len(i)-1)
newTS = []
newColumns = []
for col in columns:
newColumns.append([])
newColumns = tuple(newColumns)
#Patch together fragments and fillers
print "\t fragmentIdx = " + str(fragmentIdx) + ", len(ts)=" + str(len(ts))
for i in xrange(1,len(fragmentIdx)):
print "\t appending, i=" + str(i),
newTS += list(ts[fragmentIdx[i-1]:fragmentIdx[i]])
addRange = range(ts[fragmentIdx[i]-1],ts[fragmentIdx[i]],dt)
newTS += addRange
print "len(addRange) = " + str(len(addRange))
for (colOld,colNew) in zip(columns,newColumns):
colNew += list(colOld[fragmentIdx[i-1]:fragmentIdx[i]])
if patchmode=='zero':
colNew += [0]*(len(addRange))
#tail of arrays
newTS += list(ts[fragmentIdx[-1]:])
for (colOld,colNew) in zip(columns,newColumns):
colNew += list(colOld[fragmentIdx[-1]:])
return (np.asarray(newTS), map(np.asarray, newColumns) )
pass
#Read arcbound_original.dat
print "Reading arcbound_original.dat"
(ts, emit_tip, emit_flat, emit_SEY, emit_evap, emit_sput_cat, emit_sput_ano, emit_htspk) = np.loadtxt("../arcbounds_original.dat", dtype='int', converters={2:lambda x: None}, usecols=(0,3,5,6,7,8,9,10), unpack=True)
if PLOTOPTS == 2:
print "patching data..."
(ts, (emit_tip, emit_flat, emit_SEY, emit_evap, emit_sput_cat, emit_sput_ano, emit_htspk)) = patchArray(ts, (emit_tip, emit_flat, emit_SEY, emit_evap, emit_sput_cat, emit_sput_ano, emit_htspk))
print "done."
print
#Read circuit.dat
print "Reading circuit.dat"
(circ_ts, circ_deltaQ, circ_U) = np.loadtxt("../circuit.dat", usecols=(0,1,2), unpack=True)
if PLOTOPTS == 2:
print "patching data..."
(circ_ts, (circ_deltaQ, circ_U)) = patchArray(circ_ts, (circ_deltaQ, circ_U))
print "done."
print
#Read arcbound.dat
print "Reading arcbound.dat"
(ab_ts, ab_removed_e, ab_removed_i,ab_removed_n_cat,ab_removed_n_ano,ab_removed_n_r) = np.loadtxt("../arcbounds.dat", usecols=(0,9,15,18,19,20), unpack=True)
if PLOTOPTS == 2:
print "patching data..."
(ab_ts, (ab_removed_e, ab_removed_i,ab_removed_n_cat,ab_removed_n_ano,ab_removed_n_r)) = patchArray(ab_ts, (ab_removed_e, ab_removed_i,ab_removed_n_cat,ab_removed_n_ano,ab_removed_n_r))
print "done."
print
#Read mainstats.dat
print "Reading mainStats.dat"
(ms_ts, ms_numNeutrals) = np.loadtxt("../mainStats.dat", usecols=(0,5), unpack=True)
if PLOTOPTS == 2:
print "patching data..."
(ms_ts, (ms_numNeutrals,)) = patchArray(ms_ts, (ms_numNeutrals,))
print "done."
print
#Ionization rate
print "Calculating ionization rate..."
assert inputfile.n2inj_step == inputfile.dt_ion
print "dt_ion =", inputfile.dt_ion
delta_numNeutrals = np.diff([0]+list(ms_numNeutrals[::inputfile.dt_ion]))
ionizations = (emit_evap[::inputfile.dt_ion]+emit_sput_cat[::inputfile.dt_ion]+emit_sput_ano[::inputfile.dt_ion]+emit_htspk[::inputfile.dt_ion] - ab_removed_n_cat[::inputfile.dt_ion]-ab_removed_n_ano[::inputfile.dt_ion]-ab_removed_n_r[::inputfile.dt_ion] - delta_numNeutrals)/float(inputfile.dt_ion)
print "done."
print
#Important utility functions
def smooth(data,steps=smoothsteps, endFix=True):
"Boxcar window smoother"
#print "Convolving..."
if smooth.kernel==None or len(smooth.kernel) != steps:
print "Creating kernel with steps =", \
steps, "=", steps*inputfile.dT*1e9, "ns at dt,", steps*inputfile.dT*1e9*inputfile.dt_ion, "ns at dt_ion"
smooth.kernel = np.ones(steps)/float(steps)
if len(data) < steps:
print "Error: len(data) < steps!"
exit(1)
ret = np.convolve(data,smooth.kernel,'same')
if endFix:
#print ret
overhang1 = int(steps)/2
overhang2 = steps-overhang1-1
#print overhang1, overhang2
for i in xrange(overhang1):
ret[i] *= float(steps) / float( steps - overhang1 + i )
for i in xrange(overhang2):
ret[-i-1] *= float(steps) / float( steps - overhang2 + i )
#print ret
#print "done."
return ret
smooth.kernel=None
def backfill(data,steps):
assert type(data) == np.ndarray #this type passed by reference
fSteps = float(steps)
#Normalize first point
data[0] /= fSteps
#Backfill
i = steps
while i < len(data):
DATA = data[i]/fSteps
j = i-steps+1
for k in xrange(j,i+1):
data[k] = DATA
i += steps
print "backfilling..."
dt_ion = inputfile.dt_ion
assert ts[1]-ts[0] == 1 #dt_out
assert inputfile.e2inj_step == 1
assert inputfile.n2inj_step == dt_ion
backfill(emit_evap, dt_ion)
backfill(emit_sput_cat, dt_ion)
backfill(emit_sput_ano, dt_ion)
backfill(emit_htspk, dt_ion)
backfill(ab_removed_i, dt_ion)
backfill(ab_removed_n_cat, dt_ion)
backfill(ab_removed_n_ano, dt_ion)
backfill(ab_removed_n_r, dt_ion)
print "Done."
convFactor1 = inputfile.N_sp/inputfile.dT #superpartices/timestep -> particles/sec
convFactor2 = 1.60217657e-19*inputfile.N_sp/inputfile.dT # superparticles/timestep -> Amps
convFactor3 = (inputfile.dz/inputfile.Omega_pe)**2*inputfile.T_ref #dimless potential -> Volts
print "Smoothing..."
(emit_tip_smooth, emit_flat_smooth, emit_SEY_smooth, \
emit_evap_smooth, emit_sput_cat_smooth, emit_sput_ano_smooth, emit_htspk_smooth, ab_removed_e_smooth, ab_removed_i_smooth, ab_removed_n_cat_smooth,ab_removed_n_ano_smooth,ab_removed_n_r_smooth) = \
map(smooth, (emit_tip, emit_flat, emit_SEY, emit_evap, emit_sput_cat, emit_sput_ano, emit_htspk, ab_removed_e, ab_removed_i, ab_removed_n_cat,ab_removed_n_ano,ab_removed_n_r) )
circ_deltaQ_smooth = smooth(circ_deltaQ)
ionizations_smooth = smooth(ionizations,int(1.0*smoothsteps/inputfile.dt_ion))
ionizations_time = ms_ts[::inputfile.dt_ion]*inputfile.dT*1e9
print "Done."
t = ts*inputfile.dT*1e9 #ns
circ_t = circ_ts*inputfile.dT*1e9 #ns
ab_t = ab_ts*inputfile.dT*1e9 #ns
# plt.figure(1) #Particle currents (particles/sec)
# plt.plot(t, emit_tip*convFactor1, label="Tip")
# plt.plot(t, emit_flat*convFactor1, label="Flat")
# plt.plot(t, emit_SEY*convFactor1, label="SEY")
# plt.plot(t, emit_evap*convFactor1, label="Evap")
# plt.plot(t, emit_sput_cat*convFactor1, label="Sput (cat)")
# plt.plot(t, emit_sput_ano*convFactor1, label="Sput (ano)")
# plt.plot(t, emit_htspk*convFactor1, label="Heatspike")
# plt.plot(circ_t, circ_deltaQ*convFactor1, "r--", label="Circuit current")
# plt.title("Cathode")
# plt.xlabel("t [ns]")
# plt.ylabel("particles/sec.")
# plt.legend(loc=0)
# plt.figure(2,dpi=DPI,figsize=FIGSIZE) #Particle currents (particles/sec) (smoothed)
# plt.plot(t, emit_tip_smooth*convFactor1, label="Tip")
# plt.plot(t, emit_flat_smooth*convFactor1, label="Flat")
# plt.plot(t, emit_SEY_smooth*convFactor1, label="SEY")
# plt.plot(t, emit_evap_smooth*convFactor1, label="Evap")
# plt.plot(t, emit_sput_cat_smooth*convFactor1, label="Sput (cat)")
# plt.plot(t, emit_sput_ano_smooth*convFactor1, label="Sput (ano)")
# plt.plot(t, emit_htspk_smooth*convFactor1, label="Heatspike")
# plt.plot(circ_t, circ_deltaQ_smooth*convFactor1, "r--", label="Circuit current")
# plt.title("Cathode, smoothing = %d steps = %.3f ps " %
# (smoothsteps, smoothsteps*inputfile.dT*1e12) )
# plt.xlabel("t [ns]")
# plt.ylabel("particles/sec.")
# plt.legend(loc=0)
plt.figure(3,dpi=DPI,figsize=FIGSIZE) #Neutral particle currents (particles/sec) (smoothed)
line1, = plt.plot(t, emit_evap_smooth*convFactor1, label="Evaporation")
line2, = plt.plot(t, emit_sput_cat_smooth*convFactor1, label="Sputtering from cathode")
line3, = plt.plot(t, emit_sput_ano_smooth*convFactor1, label="Sputtering from anode")
line4, = plt.plot(t, emit_htspk_smooth*convFactor1, label="High-flux sputtering")
#line5, = plt.plot(t, -ab_removed_n_cat_smooth*convFactor1, label="Removed (cathode)")
#line6, = plt.plot(t, -ab_removed_n_ano_smooth*convFactor1, label="Removed (anode)")
#line7, = plt.plot(t, -ab_removed_n_r_smooth*convFactor1, label="Removed (radial)")
if PLOTOPTS == 1:
#Problematic when missing different ammounts of data in different files
line8, = plt.plot(ab_t, +(ab_removed_n_cat_smooth+ab_removed_n_ano_smooth+ab_removed_n_r_smooth)*convFactor1,
label="Removed at walls \n (no arrow)")
line9, = plt.plot(ionizations_time, +ionizations_smooth*convFactor1, label="Ionizations")
#
#line10, = plt.plot(circ_t, circ_deltaQ_smooth*convFactor1, "r--", label="Circuit current")
# plt.title("Cathode, smoothing = %d steps = %.3f ps " %
# (smoothsteps, smoothsteps*inputfile.dT*1e12) )
plt.xlabel("Time [ns]")
plt.ylabel("Particles injected or removed /sec.")
plt.legend(loc=2,frameon=False,fontsize=8)
if PLOTOPTS == 1:
plt.subplots_adjust(right=0.99, left=0.13, top=0.92, bottom=0.16)
plt.xlim(0.8,2.1)
plt.annotate(
r"Evap.", xy = (1.74,6.3e17), xycoords='data',
color=line1.get_color(), xytext=(-5, +20), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='center', verticalalignment='center',fontsize=8
)
plt.annotate(
r"Hi-f.sput.", xy = (1.74,5.7e17), xycoords='data',
color=line4.get_color(), xytext=(-5, -20), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='center', verticalalignment='center',fontsize=8
)
plt.annotate(
r"Ionizations", xy = (1.79,1.05e18), xycoords='data',
color=line9.get_color(), xytext=(20, 20), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='center', verticalalignment='center',fontsize=8
)
plt.annotate(
r"Sput./ano.", xy = (2.05,8.0e16), xycoords='data',
color=line3.get_color(), xytext=(-11, 0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='bottom',fontsize=8
)
plt.annotate(
r"Sput./cat.", xy = (1.66,1.4e17), xycoords='data',
color=line2.get_color(), xytext=(-11, 0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='center',fontsize=8
)
plt.savefig("pngs/current_PerProcess_neutralFluxes_smoothed.png",dpi=DPI)
# plt.figure(4) #Charged particle currents (amps)
# plt.plot(t, emit_tip*convFactor2, label="Tip")
# plt.plot(t, emit_flat*convFactor2, label="Flat")
# plt.plot(t, emit_SEY*convFactor2, label="SEY")
# plt.plot(circ_t, circ_deltaQ*convFactor2, "--", label="Circuit current")
# plt.title("Cathode")
# plt.xlabel("t [ns]")
# plt.ylabel("Current [A]")
# plt.legend(loc=0)
plt.figure(5, dpi=DPI,figsize=FIGSIZE) #Particle currents (amps) (smoothed)
line1, = plt.plot(t, emit_tip_smooth*convFactor2, label="Tip")
line2, = plt.plot(t, emit_flat_smooth*convFactor2, label="Flat")
line3, = plt.plot(t, emit_SEY_smooth*convFactor2, label="SEY")
line4, = plt.plot(ab_t, ab_removed_i_smooth*convFactor2, label=r"Removed Cu\textsuperscript{+}")
line5, = plt.plot(ab_t, -1*ab_removed_e_smooth*convFactor2,label=r"Removed e\textsuperscript{-}")
line6, = plt.plot(circ_t, circ_deltaQ_smooth*convFactor2, ":", label=r"$I_\mathrm{circ}$")
# plt.title("Cathode, smoothing = %d steps = %.3f ps " %
# (smoothsteps, smoothsteps*inputfile.dT*1e12) )
plt.xlabel("Time [ns]")
plt.ylabel("Current [A]")
plt.legend(loc=2,frameon=False,ncol=1,fontsize=8)
if PLOTOPTS == 1:
#plt.subplots_adjust(right=0.99, left=0.13, top=0.97, bottom=0.16)
plt.subplots_adjust(right=0.88, left=0.13, top=0.92, bottom=0.16) #make room for extra y-axis
plt.xlim(0.8,2.1)
plt.annotate(
r"Flat", xy = (1.4,1.9), xycoords='data',
color=line2.get_color(), xytext=(0, -12), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='center', verticalalignment='center',fontsize=8
)
plt.annotate(
r"$I_\mathrm{circ}$", xy = (1.47,2.5), xycoords='data',
color=line6.get_color(), xytext=(0, +20), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='center', verticalalignment='center',fontsize=8
)
plt.annotate(
r"Tip", xy = (1.93,0.33), xycoords='data',
color=line1.get_color(), xytext=(-10, +20), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='center',fontsize=8
)
plt.annotate(
r"Removed e\textsuperscript{-}", xy = (1.62,-0.5), xycoords='data',
color=line5.get_color(), xytext=(-10, -10), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='right', verticalalignment='center',fontsize=8
)
plt.annotate(
r"Removed Cu\textsuperscript{+}", xy = (1.65,0.0), xycoords='data',
color=line4.get_color(), xytext=(0, 17), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='center', verticalalignment='center',fontsize=8
)
plt.annotate(
r"SEY", xy = (1.75,0.0), xycoords='data',
color=line3.get_color(), xytext=(10, -18), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),
horizontalalignment='left', verticalalignment='center',fontsize=8
)
#right-hand y-axis with alternative units
print "Adding right hand y-axis"
(xmin,xmax,ymin,ymax) = plt.axis()
ax1=plt.gca()
ax1.yaxis.tick_left()
ax2 = ax1.twinx()
plt.ylabel("Particles injected or removed/sec.")
ax2.yaxis.tick_right()
plt.axis((xmin,xmax,ymin/1.60217657e-19,ymax/1.60217657e-19))
plt.savefig("pngs/current_PerProcess_cathodeAmps_smoothed.png",dpi=DPI)
# plt.figure(6) #Current and voltage
# plt.gca().plot(circ_t, circ_deltaQ*convFactor2, "b-", label="Circuit current")
# plt.gca().set_ylabel("Current [A]", color='b')
# plt.gca().set_xlabel("t [ns]")
# for tl in plt.gca().get_yticklabels():
# tl.set_color('b')
# ax2 = plt.gca().twinx()
# ax2.plot(circ_t, circ_U*convFactor3, "r-", label="Circuit current")
# ax2.set_ylabel("Voltage [V]", color='r')
# for tl in ax2.get_yticklabels():
# tl.set_color('r')
#plt.show()
| gpl-3.0 |
andrewbolster/thesis | posters/PDW-15/figures/figgen.py | 1 | 1760 |
import matplotlib.pyplot as plt
import networkx as nx
import scipy.constants
w=5
cut = 1.06
nodedict = {"A":(25,35),"B":(30,30),"C":(25,25),"D":(35,25),"E":(35,35)}
direct = {'type':'Direct', 'color':'g', 'weight':4}
indirect = {'type':'Indirect', 'color':'r', 'weight':2}
recommend = {'type':'Recommendation', 'color':'y', 'weight':1}
edgelist = [("A","B", direct),
("A","C", direct),
("B","C", recommend),
("A","C", recommend),
("B","D", indirect),
("B","E", indirect),
]
G = nx.DiGraph()
G.add_nodes_from(nodedict.keys())
G.add_edges_from(edgelist, alpha = 0.5)
pos = nx.spring_layout(G)
for n,p in nodedict.iteritems():
G.node[n]['pos'] = p
nodelist = [ '#90EE90' if n == 'A' else 'w' for n in G.nodes() ]
nodealpha = [ 0.1 if n == 'A' else 1.0 for n in G.nodes() ]
fig, ax = plt.subplots(1, 1, figsize=(w, w / scipy.constants.golden))
nx.draw_networkx(G, nodedict, arrows=True, ax=ax,
cmap = plt.get_cmap('jet'), node_size = 2000, width=4,
node_color=nodelist, edge_color=[e[2]['color'] for e in G.edges(data=True)],
alpha = 1.0, font_size = 20)
nx.draw_networkx_edge_labels(G,nodedict,{(k1,k2):e['type'] for k1,k2,e in G.edges_iter(data=True)},
font_size=10)
ax.set_axis_off()
xmax= cut*max(xx for xx,yy in nodedict.values())
ymax= cut*max(yy for xx,yy in nodedict.values())
xmin= (1/cut)*min(xx for xx,yy in nodedict.values())
ymin= (1/cut)*min(yy for xx,yy in nodedict.values())
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
fig.tight_layout(pad=0.0)
fig.subplots_adjust(left=0.0, right=1, top=1, bottom=0)
fig.savefig('/home/bolster/src/thesis/posters/PDW-15/figures/node_relationships.png', bbox_inches="tight")
| epl-1.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/qt/console/rich_ipython_widget.py | 5 | 14894 | #-----------------------------------------------------------------------------
# Copyright (c) 2010, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Standard libary imports.
from base64 import decodestring
import os
import re
# System libary imports.
from IPython.external.qt import QtCore, QtGui
# Local imports
from IPython.utils.traitlets import Bool
from IPython.qt.svg import save_svg, svg_to_clipboard, svg_to_image
from .ipython_widget import IPythonWidget
class RichIPythonWidget(IPythonWidget):
""" An IPythonWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichIPythonWidget protected class variables.
_payload_source_plot = 'IPython.kernel.zmq.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichIPythonWidget.
"""
kw['kind'] = 'rich'
super(RichIPythonWidget, self).__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
_supported_format = map(str, QtGui.QImageReader.supportedImageFormats())
self._jpg_supported = 'jpeg' in _supported_format
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super(RichIPythonWidget, self).export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super(RichIPythonWidget, self)._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
""" Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self.log.debug("pyout: %s", msg.get('content', ''))
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_pyout(self, msg):
""" Overridden to handle rich data types, like SVG.
"""
if not self._hidden and self._is_from_this_session(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_pyout(msg)
def _handle_display_data(self, msg):
""" Overridden to handle rich data types, like SVG.
"""
if not self._hidden and self._is_from_this_session(msg):
self.flush_clearoutput()
source = msg['content']['source']
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
if 'image/svg+xml' in data:
self.log.debug("display: %s", msg.get('content', ''))
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
self.log.debug("display: %s", msg.get('content', ''))
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
self.log.debug("display: %s", msg.get('content', ''))
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichIPythonWidget' protected interface
#---------------------------------------------------------------------------
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
if not os.path.exists(path):
os.mkdir(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n',str(ba.toBase64())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtGui.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtGui.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(width, height, transformMode=QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(width, transformMode=QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(height, transformMode=QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
| apache-2.0 |
aetilley/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
great-expectations/great_expectations | tests/data_context/test_data_context_config_ui.py | 1 | 52921 | import copy
import os
from typing import Dict, Optional
import pytest
from great_expectations import DataContext
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import (
BaseStoreBackendDefaults,
DatabaseStoreBackendDefaults,
DataContextConfig,
DataContextConfigDefaults,
DataContextConfigSchema,
DatasourceConfig,
FilesystemStoreBackendDefaults,
GCSStoreBackendDefaults,
InMemoryStoreBackendDefaults,
S3StoreBackendDefaults,
)
from great_expectations.util import filter_properties_dict
"""
What does this test and why?
This file will hold various tests to ensure that the UI functions as expected when creating a DataContextConfig object. It will ensure that the appropriate defaults are used, including when the store_backend_defaults parameter is set.
"""
@pytest.fixture(scope="function")
def construct_data_context_config():
"""
Construct a DataContextConfig fixture given the modifications in the input parameters
Returns:
Dictionary representation of a DataContextConfig to compare in tests
"""
def _construct_data_context_config(
data_context_id: str,
datasources: Dict,
config_version: float = float(
DataContextConfigDefaults.DEFAULT_CONFIG_VERSION.value
),
expectations_store_name: str = DataContextConfigDefaults.DEFAULT_EXPECTATIONS_STORE_NAME.value,
validations_store_name: str = DataContextConfigDefaults.DEFAULT_VALIDATIONS_STORE_NAME.value,
evaluation_parameter_store_name: str = DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
checkpoint_store_name: str = DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_NAME.value,
plugins_directory: Optional[str] = None,
stores: Optional[Dict] = None,
validation_operators: Optional[Dict] = None,
data_docs_sites: Optional[Dict] = None,
):
if stores is None:
stores = copy.deepcopy(DataContextConfigDefaults.DEFAULT_STORES.value)
if data_docs_sites is None:
data_docs_sites = copy.deepcopy(
DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITES.value
)
return {
"config_version": config_version,
"datasources": datasources,
"expectations_store_name": expectations_store_name,
"validations_store_name": validations_store_name,
"evaluation_parameter_store_name": evaluation_parameter_store_name,
"checkpoint_store_name": checkpoint_store_name,
"plugins_directory": plugins_directory,
"validation_operators": validation_operators,
"stores": stores,
"data_docs_sites": data_docs_sites,
"notebooks": None,
"config_variables_file_path": None,
"anonymous_usage_statistics": {
"data_context_id": data_context_id,
"enabled": True,
},
}
return _construct_data_context_config
@pytest.fixture()
def default_pandas_datasource_config():
return {
"my_pandas_datasource": {
"batch_kwargs_generators": {
"subdir_reader": {
"base_directory": "../data/",
"class_name": "SubdirReaderBatchKwargsGenerator",
}
},
"class_name": "PandasDatasource",
"data_asset_type": {
"class_name": "PandasDataset",
"module_name": "great_expectations.dataset",
},
"module_name": "great_expectations.datasource",
}
}
@pytest.fixture()
def default_spark_datasource_config():
return {
"my_spark_datasource": {
"batch_kwargs_generators": {},
"class_name": "SparkDFDatasource",
"data_asset_type": {
"class_name": "SparkDFDataset",
"module_name": "great_expectations.dataset",
},
"module_name": "great_expectations.datasource",
}
}
def test_DataContextConfig_with_BaseStoreBackendDefaults_and_simple_defaults(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Ensure that a very simple DataContextConfig setup with many defaults is created accurately
and produces a valid DataContextConfig
"""
store_backend_defaults = BaseStoreBackendDefaults()
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
checkpoint_store_name=store_backend_defaults.checkpoint_store_name,
)
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_S3StoreBackendDefaults(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Make sure that using S3StoreBackendDefaults as the store_backend_defaults applies appropriate
defaults, including default_bucket_name getting propagated to all stores.
"""
store_backend_defaults = S3StoreBackendDefaults(
default_bucket_name="my_default_bucket"
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
desired_stores_config = {
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"expectations_S3_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "expectations",
},
},
"validations_S3_store": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "validations",
},
},
"checkpoint_S3_store": {
"class_name": "CheckpointStore",
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "checkpoints",
},
},
}
desired_data_docs_sites_config = {
"s3_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "data_docs",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="expectations_S3_store",
validations_store_name="validations_S3_store",
evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
checkpoint_store_name="checkpoint_S3_store",
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_S3StoreBackendDefaults_using_all_parameters(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Make sure that S3StoreBackendDefaults parameters are handled appropriately
E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed
"""
store_backend_defaults = S3StoreBackendDefaults(
default_bucket_name="custom_default_bucket_name",
expectations_store_bucket_name="custom_expectations_store_bucket_name",
validations_store_bucket_name="custom_validations_store_bucket_name",
data_docs_bucket_name="custom_data_docs_store_bucket_name",
checkpoint_store_bucket_name="custom_checkpoint_store_bucket_name",
expectations_store_prefix="custom_expectations_store_prefix",
validations_store_prefix="custom_validations_store_prefix",
data_docs_prefix="custom_data_docs_prefix",
checkpoint_store_prefix="custom_checkpoint_store_prefix",
expectations_store_name="custom_expectations_S3_store_name",
validations_store_name="custom_validations_S3_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="custom_checkpoint_S3_store_name",
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
module_name="great_expectations.datasource",
data_asset_type={
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
desired_stores_config = {
"custom_evaluation_parameter_store_name": {
"class_name": "EvaluationParameterStore"
},
"custom_expectations_S3_store_name": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "custom_expectations_store_bucket_name",
"class_name": "TupleS3StoreBackend",
"prefix": "custom_expectations_store_prefix",
},
},
"custom_validations_S3_store_name": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "custom_validations_store_bucket_name",
"class_name": "TupleS3StoreBackend",
"prefix": "custom_validations_store_prefix",
},
},
"custom_checkpoint_S3_store_name": {
"class_name": "CheckpointStore",
"store_backend": {
"bucket": "custom_checkpoint_store_bucket_name",
"class_name": "TupleS3StoreBackend",
"prefix": "custom_checkpoint_store_prefix",
},
},
}
desired_data_docs_sites_config = {
"s3_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"bucket": "custom_data_docs_store_bucket_name",
"class_name": "TupleS3StoreBackend",
"prefix": "custom_data_docs_prefix",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="custom_expectations_S3_store_name",
validations_store_name="custom_validations_S3_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="custom_checkpoint_S3_store_name",
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately
This test sets the root_dir parameter
"""
test_root_directory = "test_root_dir"
store_backend_defaults = FilesystemStoreBackendDefaults(
root_directory=test_root_directory
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
data_context_id = data_context_config.anonymous_usage_statistics.data_context_id
desired_config = construct_data_context_config(
data_context_id=data_context_id, datasources=default_pandas_datasource_config
)
# Add root_directory to stores and data_docs
desired_config["stores"][desired_config["expectations_store_name"]][
"store_backend"
]["root_directory"] = test_root_directory
desired_config["stores"][desired_config["validations_store_name"]]["store_backend"][
"root_directory"
] = test_root_directory
desired_config["stores"][desired_config["checkpoint_store_name"]]["store_backend"][
"root_directory"
] = test_root_directory
desired_config["data_docs_sites"]["local_site"]["store_backend"][
"root_directory"
] = test_root_directory
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_FilesystemStoreBackendDefaults_and_simple_defaults_no_root_directory(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Ensure that a very simple DataContextConfig setup using FilesystemStoreBackendDefaults is created accurately
This test does not set the optional root_directory parameter
"""
store_backend_defaults = FilesystemStoreBackendDefaults()
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
checkpoint_store_name=store_backend_defaults.checkpoint_store_name,
)
# Create desired config
data_context_id = data_context_config.anonymous_usage_statistics.data_context_id
desired_config = construct_data_context_config(
data_context_id=data_context_id, datasources=default_pandas_datasource_config
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_GCSStoreBackendDefaults(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Make sure that using GCSStoreBackendDefaults as the store_backend_defaults applies appropriate
defaults, including default_bucket_name & default_project_name getting propagated
to all stores.
"""
store_backend_defaults = GCSStoreBackendDefaults(
default_bucket_name="my_default_bucket",
default_project_name="my_default_project",
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
module_name="great_expectations.datasource",
data_asset_type={
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
data_context_id = data_context_config.anonymous_usage_statistics.data_context_id
desired_stores_config = {
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"expectations_GCS_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "my_default_bucket",
"project": "my_default_project",
"class_name": "TupleGCSStoreBackend",
"prefix": "expectations",
},
},
"validations_GCS_store": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "my_default_bucket",
"project": "my_default_project",
"class_name": "TupleGCSStoreBackend",
"prefix": "validations",
},
},
"checkpoint_GCS_store": {
"class_name": "CheckpointStore",
"store_backend": {
"bucket": "my_default_bucket",
"project": "my_default_project",
"class_name": "TupleGCSStoreBackend",
"prefix": "checkpoints",
},
},
}
desired_data_docs_sites_config = {
"gcs_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"bucket": "my_default_bucket",
"project": "my_default_project",
"class_name": "TupleGCSStoreBackend",
"prefix": "data_docs",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="expectations_GCS_store",
validations_store_name="validations_GCS_store",
checkpoint_store_name="checkpoint_GCS_store",
evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_GCSStoreBackendDefaults_using_all_parameters(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Make sure that GCSStoreBackendDefaults parameters are handled appropriately
E.g. Make sure that default_bucket_name is ignored if individual bucket names are passed
"""
store_backend_defaults = GCSStoreBackendDefaults(
default_bucket_name="custom_default_bucket_name",
default_project_name="custom_default_project_name",
expectations_store_bucket_name="custom_expectations_store_bucket_name",
validations_store_bucket_name="custom_validations_store_bucket_name",
data_docs_bucket_name="custom_data_docs_store_bucket_name",
checkpoint_store_bucket_name="custom_checkpoint_store_bucket_name",
expectations_store_project_name="custom_expectations_store_project_name",
validations_store_project_name="custom_validations_store_project_name",
data_docs_project_name="custom_data_docs_store_project_name",
checkpoint_store_project_name="custom_checkpoint_store_project_name",
expectations_store_prefix="custom_expectations_store_prefix",
validations_store_prefix="custom_validations_store_prefix",
data_docs_prefix="custom_data_docs_prefix",
checkpoint_store_prefix="custom_checkpoint_store_prefix",
expectations_store_name="custom_expectations_GCS_store_name",
validations_store_name="custom_validations_GCS_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="custom_checkpoint_GCS_store_name",
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
module_name="great_expectations.datasource",
data_asset_type={
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
desired_stores_config = {
"custom_evaluation_parameter_store_name": {
"class_name": "EvaluationParameterStore"
},
"custom_expectations_GCS_store_name": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "custom_expectations_store_bucket_name",
"project": "custom_expectations_store_project_name",
"class_name": "TupleGCSStoreBackend",
"prefix": "custom_expectations_store_prefix",
},
},
"custom_validations_GCS_store_name": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "custom_validations_store_bucket_name",
"project": "custom_validations_store_project_name",
"class_name": "TupleGCSStoreBackend",
"prefix": "custom_validations_store_prefix",
},
},
"custom_checkpoint_GCS_store_name": {
"class_name": "CheckpointStore",
"store_backend": {
"bucket": "custom_checkpoint_store_bucket_name",
"project": "custom_checkpoint_store_project_name",
"class_name": "TupleGCSStoreBackend",
"prefix": "custom_checkpoint_store_prefix",
},
},
}
desired_data_docs_sites_config = {
"gcs_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"bucket": "custom_data_docs_store_bucket_name",
"project": "custom_data_docs_store_project_name",
"class_name": "TupleGCSStoreBackend",
"prefix": "custom_data_docs_prefix",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="custom_expectations_GCS_store_name",
validations_store_name="custom_validations_GCS_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="custom_checkpoint_GCS_store_name",
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_DatabaseStoreBackendDefaults(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Make sure that using DatabaseStoreBackendDefaults as the store_backend_defaults applies appropriate
defaults, including default_credentials getting propagated to stores and not data_docs
"""
store_backend_defaults = DatabaseStoreBackendDefaults(
default_credentials={
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "65432",
"username": "ge_tutorials",
"password": "ge_tutorials",
"database": "ge_tutorials",
},
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
module_name="great_expectations.datasource",
data_asset_type={
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
desired_stores_config = {
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"expectations_database_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": {
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "65432",
"username": "ge_tutorials",
"password": "ge_tutorials",
"database": "ge_tutorials",
},
},
},
"validations_database_store": {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": {
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "65432",
"username": "ge_tutorials",
"password": "ge_tutorials",
"database": "ge_tutorials",
},
},
},
"checkpoint_database_store": {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": {
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "65432",
"username": "ge_tutorials",
"password": "ge_tutorials",
"database": "ge_tutorials",
},
},
},
}
desired_data_docs_sites_config = {
"local_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"base_directory": "uncommitted/data_docs/local_site/",
"class_name": "TupleFilesystemStoreBackend",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="expectations_database_store",
validations_store_name="validations_database_store",
checkpoint_store_name="checkpoint_database_store",
evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_DatabaseStoreBackendDefaults_using_all_parameters(
construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Make sure that DatabaseStoreBackendDefaults parameters are handled appropriately
E.g. Make sure that default_credentials is ignored if individual store credentials are passed
"""
store_backend_defaults = DatabaseStoreBackendDefaults(
default_credentials={
"drivername": "postgresql",
"host": os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost"),
"port": "65432",
"username": "ge_tutorials",
"password": "ge_tutorials",
"database": "ge_tutorials",
},
expectations_store_credentials={
"drivername": "custom_expectations_store_drivername",
"host": "custom_expectations_store_host",
"port": "custom_expectations_store_port",
"username": "custom_expectations_store_username",
"password": "custom_expectations_store_password",
"database": "custom_expectations_store_database",
},
validations_store_credentials={
"drivername": "custom_validations_store_drivername",
"host": "custom_validations_store_host",
"port": "custom_validations_store_port",
"username": "custom_validations_store_username",
"password": "custom_validations_store_password",
"database": "custom_validations_store_database",
},
checkpoint_store_credentials={
"drivername": "custom_checkpoint_store_drivername",
"host": "custom_checkpoint_store_host",
"port": "custom_checkpoint_store_port",
"username": "custom_checkpoint_store_username",
"password": "custom_checkpoint_store_password",
"database": "custom_checkpoint_store_database",
},
expectations_store_name="custom_expectations_database_store_name",
validations_store_name="custom_validations_database_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="custom_checkpoint_database_store_name",
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
module_name="great_expectations.datasource",
data_asset_type={
"module_name": "great_expectations.dataset",
"class_name": "PandasDataset",
},
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
desired_stores_config = {
"custom_evaluation_parameter_store_name": {
"class_name": "EvaluationParameterStore"
},
"custom_expectations_database_store_name": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": {
"database": "custom_expectations_store_database",
"drivername": "custom_expectations_store_drivername",
"host": "custom_expectations_store_host",
"password": "custom_expectations_store_password",
"port": "custom_expectations_store_port",
"username": "custom_expectations_store_username",
},
},
},
"custom_validations_database_store_name": {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": {
"database": "custom_validations_store_database",
"drivername": "custom_validations_store_drivername",
"host": "custom_validations_store_host",
"password": "custom_validations_store_password",
"port": "custom_validations_store_port",
"username": "custom_validations_store_username",
},
},
},
"custom_checkpoint_database_store_name": {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "DatabaseStoreBackend",
"credentials": {
"database": "custom_checkpoint_store_database",
"drivername": "custom_checkpoint_store_drivername",
"host": "custom_checkpoint_store_host",
"password": "custom_checkpoint_store_password",
"port": "custom_checkpoint_store_port",
"username": "custom_checkpoint_store_username",
},
},
},
}
desired_data_docs_sites_config = {
"local_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"base_directory": "uncommitted/data_docs/local_site/",
"class_name": "TupleFilesystemStoreBackend",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="custom_expectations_database_store_name",
validations_store_name="custom_validations_database_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="custom_checkpoint_database_store_name",
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_override_general_defaults(
construct_data_context_config,
default_pandas_datasource_config,
default_spark_datasource_config,
):
"""
What does this test and why?
A DataContextConfig should be able to be created by passing items into the constructor that override any defaults.
It should also be able to handle multiple datasources, even if they are configured with a dictionary or a DatasourceConfig.
"""
data_context_config = DataContextConfig(
config_version=999,
plugins_directory="custom_plugins_directory",
config_variables_file_path="custom_config_variables_file_path",
datasources={
"my_spark_datasource": {
"data_asset_type": {
"class_name": "SparkDFDataset",
"module_name": "great_expectations.dataset",
},
"class_name": "SparkDFDatasource",
"module_name": "great_expectations.datasource",
"batch_kwargs_generators": {},
},
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "../data/",
}
},
),
},
stores={
"expectations_S3_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": "REPLACE_ME",
"prefix": "REPLACE_ME",
},
},
"expectations_S3_store2": {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": "REPLACE_ME",
"prefix": "REPLACE_ME",
},
},
"validations_S3_store": {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": "REPLACE_ME",
"prefix": "REPLACE_ME",
},
},
"validations_S3_store2": {
"class_name": "ValidationsStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": "REPLACE_ME",
"prefix": "REPLACE_ME",
},
},
"custom_evaluation_parameter_store": {
"class_name": "EvaluationParameterStore"
},
"checkpoint_S3_store": {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": "REPLACE_ME",
"prefix": "REPLACE_ME",
},
},
},
expectations_store_name="custom_expectations_store_name",
validations_store_name="custom_validations_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="checkpoint_S3_store",
data_docs_sites={
"s3_site": {
"class_name": "SiteBuilder",
"store_backend": {
"class_name": "TupleS3StoreBackend",
"bucket": "REPLACE_ME",
},
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
},
"local_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"base_directory": "uncommitted/data_docs/local_site/",
"class_name": "TupleFilesystemStoreBackend",
},
},
},
validation_operators={
"custom_action_list_operator": {
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "custom_store_validation_result",
"action": {"class_name": "CustomStoreValidationResultAction"},
},
{
"name": "store_evaluation_params",
"action": {"class_name": "StoreEvaluationParametersAction"},
},
{
"name": "update_data_docs",
"action": {"class_name": "UpdateDataDocsAction"},
},
],
}
},
anonymous_usage_statistics={"enabled": True},
)
desired_stores = {
"custom_evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"expectations_S3_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "REPLACE_ME",
"class_name": "TupleS3StoreBackend",
"prefix": "REPLACE_ME",
},
},
"expectations_S3_store2": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "REPLACE_ME",
"class_name": "TupleS3StoreBackend",
"prefix": "REPLACE_ME",
},
},
"validations_S3_store": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "REPLACE_ME",
"class_name": "TupleS3StoreBackend",
"prefix": "REPLACE_ME",
},
},
"validations_S3_store2": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "REPLACE_ME",
"class_name": "TupleS3StoreBackend",
"prefix": "REPLACE_ME",
},
},
"checkpoint_S3_store": {
"class_name": "CheckpointStore",
"store_backend": {
"bucket": "REPLACE_ME",
"class_name": "TupleS3StoreBackend",
"prefix": "REPLACE_ME",
},
},
}
desired_data_docs_sites_config = {
"local_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"base_directory": "uncommitted/data_docs/local_site/",
"class_name": "TupleFilesystemStoreBackend",
},
},
"s3_site": {
"class_name": "SiteBuilder",
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"bucket": "REPLACE_ME",
"class_name": "TupleS3StoreBackend",
},
},
}
desired_validation_operators = {
"custom_action_list_operator": {
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "custom_store_validation_result",
"action": {"class_name": "CustomStoreValidationResultAction"},
},
{
"name": "store_evaluation_params",
"action": {"class_name": "StoreEvaluationParametersAction"},
},
{
"name": "update_data_docs",
"action": {"class_name": "UpdateDataDocsAction"},
},
],
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources={
**default_pandas_datasource_config,
**default_spark_datasource_config,
},
config_version=999.0,
expectations_store_name="custom_expectations_store_name",
validations_store_name="custom_validations_store_name",
evaluation_parameter_store_name="custom_evaluation_parameter_store_name",
checkpoint_store_name="checkpoint_S3_store",
stores=desired_stores,
validation_operators=desired_validation_operators,
data_docs_sites=desired_data_docs_sites_config,
plugins_directory="custom_plugins_directory",
)
desired_config["config_variables_file_path"] = "custom_config_variables_file_path"
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
def test_DataContextConfig_with_S3StoreBackendDefaults_and_simple_defaults_with_variable_sub(
monkeypatch, construct_data_context_config, default_pandas_datasource_config
):
"""
What does this test and why?
Ensure that a very simple DataContextConfig setup with many defaults is created accurately
and produces a valid DataContextConfig
"""
monkeypatch.setenv("SUBSTITUTED_BASE_DIRECTORY", "../data/")
store_backend_defaults = S3StoreBackendDefaults(
default_bucket_name="my_default_bucket"
)
data_context_config = DataContextConfig(
datasources={
"my_pandas_datasource": DatasourceConfig(
class_name="PandasDatasource",
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": "${SUBSTITUTED_BASE_DIRECTORY}",
}
},
)
},
store_backend_defaults=store_backend_defaults,
)
# Create desired config
desired_stores_config = {
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"expectations_S3_store": {
"class_name": "ExpectationsStore",
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "expectations",
},
},
"validations_S3_store": {
"class_name": "ValidationsStore",
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "validations",
},
},
"checkpoint_S3_store": {
"class_name": "CheckpointStore",
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "checkpoints",
},
},
}
desired_data_docs_sites_config = {
"s3_site": {
"class_name": "SiteBuilder",
"show_how_to_buttons": True,
"site_index_builder": {
"class_name": "DefaultSiteIndexBuilder",
},
"store_backend": {
"bucket": "my_default_bucket",
"class_name": "TupleS3StoreBackend",
"prefix": "data_docs",
},
}
}
desired_config = construct_data_context_config(
data_context_id=data_context_config.anonymous_usage_statistics.data_context_id,
datasources=default_pandas_datasource_config,
expectations_store_name="expectations_S3_store",
validations_store_name="validations_S3_store",
checkpoint_store_name="checkpoint_S3_store",
evaluation_parameter_store_name=DataContextConfigDefaults.DEFAULT_EVALUATION_PARAMETER_STORE_NAME.value,
stores=desired_stores_config,
data_docs_sites=desired_data_docs_sites_config,
)
desired_config["datasources"]["my_pandas_datasource"]["batch_kwargs_generators"][
"subdir_reader"
]["base_directory"] = "${SUBSTITUTED_BASE_DIRECTORY}"
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
data_context = BaseDataContext(project_config=data_context_config)
assert (
data_context.datasources["my_pandas_datasource"]
.get_batch_kwargs_generator("subdir_reader")
._base_directory
== "../data/"
)
def test_DataContextConfig_with_InMemoryStoreBackendDefaults(
construct_data_context_config,
):
store_backend_defaults = InMemoryStoreBackendDefaults()
data_context_config = DataContextConfig(
store_backend_defaults=store_backend_defaults,
)
desired_config = {
"anonymous_usage_statistics": {
"data_context_id": data_context_config.anonymous_usage_statistics.data_context_id,
"enabled": True,
},
"checkpoint_store_name": "checkpoint_store",
"config_version": 3.0,
"evaluation_parameter_store_name": "evaluation_parameter_store",
"expectations_store_name": "expectations_store",
"stores": {
"checkpoint_store": {
"class_name": "CheckpointStore",
"store_backend": {"class_name": "InMemoryStoreBackend"},
},
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"expectations_store": {
"class_name": "ExpectationsStore",
"store_backend": {"class_name": "InMemoryStoreBackend"},
},
"validations_store": {
"class_name": "ValidationsStore",
"store_backend": {"class_name": "InMemoryStoreBackend"},
},
},
"validations_store_name": "validations_store",
}
data_context_config_schema = DataContextConfigSchema()
assert filter_properties_dict(
properties=data_context_config_schema.dump(data_context_config),
clean_falsy=True,
) == filter_properties_dict(
properties=desired_config,
clean_falsy=True,
)
assert DataContext.validate_config(project_config=data_context_config)
| apache-2.0 |
ajaech/username_analytics | MyClassifier.py | 1 | 9130 | import code
import collections
import gzip
import math
import morfessor
import numpy
import pandas
import random
import segmenter
import sklearn.metrics
from matplotlib import pylab
random.seed(666)
model = None
def LoadUsernames(filename, maxload=400000000):
if filename.endswith('.gz'):
f = gzip.open(filename, 'r')
else:
f = open(filename, 'r')
usernames = []
for i, line in enumerate(f):
if i > maxload:
break
usernames.append(line.strip())
f.close()
random.shuffle(usernames)
return usernames
class BinaryClassifier:
alpha = 2.0 # smoothing parameter
def __init__(self, segfun, guy_morphs, girl_morphs,
guy_count, girl_count):
self.segfun = segfun
self.guy_morphs = guy_morphs
self.girl_morphs = girl_morphs
self.guy_count = guy_count
self.girl_count = girl_count
self.confidences = None
self.confidence_bins = None
@classmethod
def Train(cls, segfun, guy_train, girl_train):
guy_morphs = collections.defaultdict(int)
girl_morphs = collections.defaultdict(int)
tempfile = open('segments.txt', 'w')
for name in guy_train:
segments = segfun(name.lower())
msg = u'male: {0} -- {1}\n'.format(name.decode('utf8'), u'*'.join(segments))
tempfile.write(msg.encode('utf8'))
for seg in segments:
guy_morphs[seg] += 1
for name in girl_train:
segments = segfun(name.lower())
msg = u'female: {0} -- {1}\n'.format(name.decode('utf8'), u'*'.join(segments))
tempfile.write(msg.encode('utf8'))
for seg in segments:
girl_morphs[seg] += 1
tempfile.close()
return cls(segfun, guy_morphs, girl_morphs, len(guy_train),
len(girl_train))
@classmethod
def TrainSemiSupervised(cls, usernames, classifier, unlabeled_weight = 0.3):
segfun = classifier.segfun
guy_morphs = collections.defaultdict(int, classifier.guy_morphs)
girl_morphs = collections.defaultdict(int, classifier.girl_morphs)
guy_count = classifier.guy_count
girl_count = classifier.girl_count
for name in usernames:
score, segments = classifier.Classify(name.lower(), return_segments=True)
guy_prob = classifier.GetConfidence(score)
girl_prob = 1.0 - guy_prob
# skip people that the classifier is uncertain about
if abs(guy_prob - 0.5) < 0.1:
continue
guy_prob *= unlabeled_weight
girl_prob *= unlabeled_weight
guy_count += guy_prob
girl_count += girl_prob
for seg in segments:
guy_morphs[seg] += guy_prob
girl_morphs[seg] += girl_prob
return cls(segfun, guy_morphs, girl_morphs, guy_count,
girl_count)
def GetTopRatios(self):
stats = []
all_morphs = set(self.guy_morphs.keys() + self.girl_morphs.keys())
print 'vocabulary size {0}'.format(len(all_morphs))
# compute the average morph length
avg_morph_len = sum([len(morph) for morph in all_morphs]) / float(len(all_morphs))
print 'average morph length {0}'.format(avg_morph_len)
morphs = [self.guy_morphs, self.girl_morphs]
class_totals = [sum(morphs[i].values()) + self.alpha * len(all_morphs)
for i in range(len(morphs))]
class_totals = [float(c) for c in class_totals]
for token in all_morphs:
guy_prob = (morphs[0][token] + self.alpha) / class_totals[0]
girl_prob = (morphs[1][token] + self.alpha) / class_totals[1]
ratio = numpy.log(guy_prob) - numpy.log(girl_prob)
stats.append({'morph': token, 'ratio': ratio, 'weight': numpy.abs(ratio),
'guy count': self.guy_morphs[token],
'girl count': self.girl_morphs[token]})
d = pandas.DataFrame(stats)
d.sort('weight', inplace=True, ascending=False)
print d[:20]
def TrainConfidenceEstimator(self, guy_names, girl_names):
guy_scores = [self.Classify(name) for name in guy_names]
girl_scores = [self.Classify(name) for name in girl_names]
total = sorted(guy_scores + girl_scores)
bins = numpy.percentile(total, range(0, 101, 10))
guy_bin_counts, _ = numpy.histogram(guy_scores, bins)
girl_bin_counts, _ = numpy.histogram(girl_scores, bins)
confidences = numpy.array(guy_bin_counts, dtype=float) / (
guy_bin_counts + girl_bin_counts)
self.confidences = confidences
self.confidence_bins = bins
def GetConfidence(self, score):
bin = numpy.histogram([score], self.confidence_bins)[0]
confidence = self.confidences[numpy.argmax(bin)]
return confidence
def Classify(self, username, return_segments=False, alpha=None,
oov_counter=None):
if alpha is None:
alpha = self.alpha
segments = self.segfun(username.lower())
p_of_c = float(self.guy_count) / float(self.girl_count + self.guy_count)
guy_prob = math.log(p_of_c)
girl_prob = math.log(1.0 - p_of_c)
guy_denom = 1.0 / (self.guy_count + 2 * alpha)
girl_denom = 1.0 / (self.girl_count + 2 * alpha)
for seg in segments:
guy_count = self.guy_morphs.get(seg, 0)
girl_count = self.girl_morphs.get(seg, 0)
if oov_counter is not None:
if guy_count + girl_count == 0:
oov_counter[seg] += 1
guy_prob += math.log((guy_count + 2.0 * p_of_c * alpha) * guy_denom)
girl_prob += math.log((girl_count + 2.0 * (1.0 - p_of_c) * alpha) * girl_denom)
score = guy_prob - girl_prob
if return_segments:
return score, segments
return score
def Partition(data, percents):
cutoffs = [int(math.floor(len(data) * p)) for p in numpy.cumsum(percents)]
return data[:cutoffs[0]], data[cutoffs[0]:cutoffs[1]], data[cutoffs[1]:]
def GetRocCurve(a_scores, b_scores):
all_scores = a_scores + b_scores
labels = [1 for _ in a_scores] + [-1 for _ in b_scores]
fpr, tpr, thresh = sklearn.metrics.roc_curve(labels, all_scores)
return fpr, tpr, thresh
def TestAccuracy(classifier, classA, classB, threshold):
oov_counts = collections.defaultdict(int)
all_morphs = set(classifier.guy_morphs.keys() + classifier.girl_morphs.keys())
print 'vocabulary size {0}'.format(len(all_morphs))
a_scores = [classifier.Classify(name, oov_counter=oov_counts) for name in classA]
b_scores = [classifier.Classify(name, oov_counter=oov_counts) for name in classB]
print 'total # of oovs types: {0} tokens: {1}'.format(
len(oov_counts), sum(oov_counts.values()))
num_correct = (b_scores < threshold).sum() + (a_scores >= threshold).sum()
acc = num_correct / float(len(classA) + len(classB))
return acc
def GetOptimalThreshold(classifier, classA, classB):
smooth_levels = (1.0, 2.0, 5.0, 7.0, 9.0)
results = []
for alpha in smooth_levels:
a_scores = [classifier.Classify(name, alpha=alpha) for name in classA]
b_scores = [classifier.Classify(name, alpha=alpha) for name in classB]
fpr, tpr, thresh = GetRocCurve(a_scores, b_scores)
idx = (1.0 - fpr) < tpr
crossover = numpy.where(idx)[0].min()
acc = 0.5 * (tpr[crossover] + tpr[crossover-1])
auc = sklearn.metrics.auc(fpr, tpr)
results.append({'thresh': thresh[crossover], 'accuracy': acc,
'smooth': alpha})
data = pandas.DataFrame(results)
idx = numpy.argmax(data.accuracy)
classifier.alpha = data.smooth[idx]
return data.thresh[idx]
num_semisup = 2000000
snapchat_names = LoadUsernames('snapchat/test_usernames.txt.gz',
maxload=num_semisup)
print '{0} semisup names loaded'.format(len(snapchat_names))
def DoTest(classA, classB, unsupervised=False, balance=False,
use_baseline_segmenter=False):
classA = list(classA)
classB = list(classB)
if balance:
max_len = min(len(classA), len(classB))
classA = classA[:max_len]
classB = classB[:max_len]
random.shuffle(classA)
random.shuffle(classB)
percents = (0.2, 0.1, 0.7)
classA_test, classA_validation, classA_train = Partition(classA, percents)
classB_test, classB_validation, classB_train = Partition(classB, percents)
if use_baseline_segmenter:
seg_func = segmenter.baseline_segmenter
else:
seg_func = segmenter.morph_segmenter(model)
classifier = BinaryClassifier.Train(seg_func, classA_train, classB_train)
classifier.GetTopRatios()
thresh = GetOptimalThreshold(classifier,
classA_validation, classB_validation)
acc = TestAccuracy(classifier, classA_test, classB_test, thresh)
print 'test accuracy {0}'.format(acc)
if unsupervised:
semisup_classifier = classifier
for iter_num in range(3):
print 'semi-sup iter {0}'.format(iter_num)
semisup_classifier.TrainConfidenceEstimator(classA_validation, classB_validation)
semisup_classifier = BinaryClassifier.TrainSemiSupervised(snapchat_names,
semisup_classifier)
thresh = GetOptimalThreshold(semisup_classifier, classA_validation, classB_validation)
acc = TestAccuracy(semisup_classifier, classA_test, classB_test, thresh)
print 'accuracy {0}'.format(acc)
| gpl-2.0 |
fredhusser/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
Adai0808/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
dsm054/pandas | asv_bench/benchmarks/io/stata.py | 5 | 1526 | import numpy as np
from pandas import DataFrame, date_range, read_stata
import pandas.util.testing as tm
from ..pandas_vb_common import BaseIO
class Stata(BaseIO):
params = ['tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty']
param_names = ['convert_dates']
def setup(self, convert_dates):
self.fname = '__test__.dta'
N = 100000
C = 5
self.df = DataFrame(np.random.randn(N, C),
columns=['float{}'.format(i) for i in range(C)],
index=date_range('20000101', periods=N, freq='H'))
self.df['object'] = tm.makeStringIndex(N)
self.df['int8_'] = np.random.randint(np.iinfo(np.int8).min,
np.iinfo(np.int8).max - 27, N)
self.df['int16_'] = np.random.randint(np.iinfo(np.int16).min,
np.iinfo(np.int16).max - 27, N)
self.df['int32_'] = np.random.randint(np.iinfo(np.int32).min,
np.iinfo(np.int32).max - 27, N)
self.df['float32_'] = np.array(np.random.randn(N),
dtype=np.float32)
self.convert_dates = {'index': convert_dates}
self.df.to_stata(self.fname, self.convert_dates)
def time_read_stata(self, convert_dates):
read_stata(self.fname)
def time_write_stata(self, convert_dates):
self.df.to_stata(self.fname, self.convert_dates)
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
ngoix/OCRF | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
bavardage/statsmodels | statsmodels/sandbox/panel/mixed.py | 4 | 20977 | """
Mixed effects models
Author: Jonathan Taylor
Author: Josef Perktold
License: BSD-3
Notes
------
It's pretty slow if the model is misspecified, in my first example convergence
in loglike is not reached within 2000 iterations. Added stop criteria based
on convergence of parameters instead.
With correctly specified model, convergence is fast, in 6 iterations in
example.
"""
import numpy as np
import numpy.linalg as L
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.tools.decorators import cache_readonly
class Unit(object):
"""
Individual experimental unit for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
endog : ndarray, (nobs,)
response, endogenous variable
exog_fe : ndarray, (nobs, k_vars_fe)
explanatory variables as regressors or fixed effects,
should include exog_re to correct mean of random
coefficients, see Notes
exog_re : ndarray, (nobs, k_vars_re)
explanatory variables or random effects or coefficients
Notes
-----
If the exog_re variables are not included in exog_fe, then the
mean of the random constants or coefficients are not centered.
The covariance matrix of the random parameter estimates are not
centered in this case. (That's how it looks to me. JP)
"""
def __init__(self, endog, exog_fe, exog_re):
self.Y = endog
self.X = exog_fe
self.Z = exog_re
self.n = endog.shape[0]
def _compute_S(self, D, sigma):
"""covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.3) from Laird, Lange, Stram (see help(Unit))
"""
self.S = (np.identity(self.n) * sigma**2 +
np.dot(self.Z, np.dot(D, self.Z.T)))
def _compute_W(self):
"""inverse covariance of observations (nobs_i, nobs_i) (JP check)
Display (3.2) from Laird, Lange, Stram (see help(Unit))
"""
self.W = L.inv(self.S)
def compute_P(self, Sinv):
"""projection matrix (nobs_i, nobs_i) (M in regression ?) (JP check, guessing)
Display (3.10) from Laird, Lange, Stram (see help(Unit))
W - W X Sinv X' W'
"""
t = np.dot(self.W, self.X)
self.P = self.W - np.dot(np.dot(t, Sinv), t.T)
def _compute_r(self, alpha):
"""residual after removing fixed effects
Display (3.5) from Laird, Lange, Stram (see help(Unit))
"""
self.r = self.Y - np.dot(self.X, alpha)
def _compute_b(self, D):
"""coefficients for random effects/coefficients
Display (3.4) from Laird, Lange, Stram (see help(Unit))
D Z' W r
"""
self.b = np.dot(D, np.dot(np.dot(self.Z.T, self.W), self.r))
def fit(self, a, D, sigma):
"""
Compute unit specific parameters in
Laird, Lange, Stram (see help(Unit)).
Displays (3.2)-(3.5).
"""
self._compute_S(D, sigma) #random effect plus error covariance
self._compute_W() #inv(S)
self._compute_r(a) #residual after removing fixed effects/exogs
self._compute_b(D) #? coefficients on random exog, Z ?
def compute_xtwy(self):
"""
Utility function to compute X^tWY (transposed ?) for Unit instance.
"""
return np.dot(np.dot(self.W, self.Y), self.X) #is this transposed ?
def compute_xtwx(self):
"""
Utility function to compute X^tWX for Unit instance.
"""
return np.dot(np.dot(self.X.T, self.W), self.X)
def cov_random(self, D, Sinv=None):
"""
Approximate covariance of estimates of random effects. Just after
Display (3.10) in Laird, Lange, Stram (see help(Unit)).
D - D' Z' P Z D
Notes
-----
In example where the mean of the random coefficient is not zero, this
is not a covariance but a non-centered moment. (proof by example)
"""
if Sinv is not None:
self.compute_P(Sinv)
t = np.dot(self.Z, D)
return D - np.dot(np.dot(t.T, self.P), t)
def logL(self, a, ML=False):
"""
Individual contributions to the log-likelihood, tries to return REML
contribution by default though this requires estimated
fixed effect a to be passed as an argument.
no constant with pi included
a is not used if ML=true (should be a=None in signature)
If ML is false, then the residuals are calculated for the given fixed
effects parameters a.
"""
if ML:
return (np.log(L.det(self.W)) - (self.r * np.dot(self.W, self.r)).sum()) / 2.
else:
if a is None:
raise ValueError('need fixed effect a for REML contribution to log-likelihood')
r = self.Y - np.dot(self.X, a)
return (np.log(L.det(self.W)) - (r * np.dot(self.W, r)).sum()) / 2.
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return - 2 * self.logL(ML=ML)
class OneWayMixed(object):
"""
Model for
EM implementation of (repeated measures)
mixed effects model.
\'Maximum Likelihood Computations with Repeated Measures:
Application of the EM Algorithm\'
Nan Laird; Nicholas Lange; Daniel Stram
Journal of the American Statistical Association,
Vol. 82, No. 397. (Mar., 1987), pp. 97-105.
Parameters
----------
units : list of units
the data for the individual units should be attached to the units
response, fixed and random : formula expression, called as argument to Formula
*available results and alias*
(subject to renaming, and coversion to cached attributes)
params() -> self.a : coefficient for fixed effects or exog
cov_params() -> self.Sinv : covariance estimate of fixed effects/exog
bse() : standard deviation of params
cov_random -> self.D : estimate of random effects covariance
params_random_units -> [self.units[...].b] : random coefficient for each unit
*attributes*
(others)
self.m : number of units
self.p : k_vars_fixed
self.q : k_vars_random
self.N : nobs (total)
Notes
-----
Fit returns a result instance, but not all results that use the inherited
methods have been checked.
Parameters need to change: drop formula and we require a naming convention for
the units (currently Y,X,Z). - endog, exog_fe, endog_re ?
logL does not include constant, e.g. sqrt(pi)
llf is for MLE not for REML
convergence criteria for iteration
Currently convergence in the iterative solver is reached if either the loglikelihood
*or* the fixed effects parameter don't change above tolerance.
In some examples, the fixed effects parameters converged to 1e-5 within 150 iterations
while the log likelihood did not converge within 2000 iterations. This might be
the case if the fixed effects parameters are well estimated, but there are still
changes in the random effects. If params_rtol and params_atol are set at a higher
level, then the random effects might not be estimated to a very high precision.
The above was with a misspecified model, without a constant. With a
correctly specified model convergence is fast, within a few iterations
(6 in example).
"""
def __init__(self, units):
self.units = units
self.m = len(self.units)
self.n_units = self.m
self.N = sum(unit.X.shape[0] for unit in self.units)
self.nobs = self.N #alias for now
# Determine size of fixed effects
d = self.units[0].X
self.p = d.shape[1] # d.shape = p
self.k_exog_fe = self.p #alias for now
self.a = np.zeros(self.p, np.float64)
# Determine size of D, and sensible initial estimates
# of sigma and D
d = self.units[0].Z
self.q = d.shape[1] # Z.shape = q
self.k_exog_re = self.q #alias for now
self.D = np.zeros((self.q,)*2, np.float64)
self.sigma = 1.
self.dev = np.inf #initialize for iterations, move it?
def _compute_a(self):
"""fixed effects parameters
Display (3.1) of
Laird, Lange, Stram (see help(Mixed)).
"""
for unit in self.units:
unit.fit(self.a, self.D, self.sigma)
S = sum([unit.compute_xtwx() for unit in self.units])
Y = sum([unit.compute_xtwy() for unit in self.units])
self.Sinv = L.pinv(S)
self.a = np.dot(self.Sinv, Y)
def _compute_sigma(self, ML=False):
"""
Estimate sigma. If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.6) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.8).
sigma is the standard deviation of the noise (residual)
"""
sigmasq = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
t = unit.r - np.dot(unit.Z, unit.b)
sigmasq += np.power(t, 2).sum()
sigmasq += self.sigma**2 * np.trace(np.identity(unit.n) -
self.sigma**2 * W)
self.sigma = np.sqrt(sigmasq / self.N)
def _compute_D(self, ML=False):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma,
else return the REML estimate.
If ML, this is (3.7) in Laird, Lange, Stram (see help(Mixed)),
otherwise it corresponds to (3.9).
"""
D = 0.
for unit in self.units:
if ML:
W = unit.W
else:
unit.compute_P(self.Sinv)
W = unit.P
D += np.multiply.outer(unit.b, unit.b)
t = np.dot(unit.Z, self.D)
D += self.D - np.dot(np.dot(t.T, W), t)
self.D = D / self.m
def cov_fixed(self):
"""
Approximate covariance of estimates of fixed effects.
Just after Display (3.10) in Laird, Lange, Stram (see help(Mixed)).
"""
return self.Sinv
#----------- alias (JP) move to results class ?
def cov_random(self):
"""
Estimate random effects covariance D.
If ML is True, return the ML estimate of sigma, else return the REML estimate.
see _compute_D, alias for self.D
"""
return self.D
@property
def params(self):
'''
estimated coefficients for exogeneous variables or fixed effects
see _compute_a, alias for self.a
'''
return self.a
@property
def params_random_units(self):
'''random coefficients for each unit
'''
return np.array([unit.b for unit in self.units])
def cov_params(self):
'''
estimated covariance for coefficients for exogeneous variables or fixed effects
see cov_fixed, and Sinv in _compute_a
'''
return self.cov_fixed()
@property
def bse(self):
'''
standard errors of estimated coefficients for exogeneous variables (fixed)
'''
return np.sqrt(np.diag(self.cov_params()))
#----------- end alias
def deviance(self, ML=False):
'''deviance defined as 2 times the negative loglikelihood
'''
return -2 * self.logL(ML=ML)
def logL(self, ML=False):
"""
Return log-likelihood, REML by default.
"""
#I don't know what the difference between REML and ML is here.
logL = 0.
for unit in self.units:
logL += unit.logL(a=self.a, ML=ML)
if not ML:
logL += np.log(L.det(self.Sinv)) / 2
return logL
def initialize(self):
S = sum([np.dot(unit.X.T, unit.X) for unit in self.units])
Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units])
self.a = L.lstsq(S, Y)[0]
D = 0
t = 0
sigmasq = 0
for unit in self.units:
unit.r = unit.Y - np.dot(unit.X, self.a)
if self.q > 1:
unit.b = L.lstsq(unit.Z, unit.r)[0]
else:
Z = unit.Z.reshape((unit.Z.shape[0], 1))
unit.b = L.lstsq(Z, unit.r)[0]
sigmasq += (np.power(unit.Y, 2).sum() -
(self.a * np.dot(unit.X.T, unit.Y)).sum() -
(unit.b * np.dot(unit.Z.T, unit.r)).sum())
D += np.multiply.outer(unit.b, unit.b)
t += L.pinv(np.dot(unit.Z.T, unit.Z))
#TODO: JP added df_resid check
self.df_resid = (self.N - (self.m - 1) * self.q - self.p)
sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
self.sigma = np.sqrt(sigmasq)
self.D = (D - sigmasq * t) / self.m
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
'''convergence check for iterative estimation
'''
self.dev, old = self.deviance(ML=ML), self.dev
#self.history.append(np.hstack((self.dev, self.a)))
self.history['llf'].append(self.dev)
self.history['params'].append(self.a.copy())
self.history['D'].append(self.D.copy())
if np.fabs((self.dev - old) / self.dev) < rtol: #why is there times `*`?
#print np.fabs((self.dev - old)), self.dev, old
self.termination = 'llf'
return False
#break if parameters converged
#TODO: check termination conditions, OR or AND
if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
self.termination = 'params'
return False
self._a_old = self.a.copy()
return True
def fit(self, maxiter=100, ML=False, rtol=1.0e-05, params_rtol=1e-6, params_atol=1e-6):
#initialize for convergence criteria
self._a_old = np.inf * self.a
self.history = {'llf':[], 'params':[], 'D':[]}
for i in range(maxiter):
self._compute_a() #a, Sinv : params, cov_params of fixed exog
self._compute_sigma(ML=ML) #sigma MLE or REML of sigma ?
self._compute_D(ML=ML) #D : covariance of random effects, MLE or REML
if not self.cont(ML=ML, rtol=rtol, params_rtol=params_rtol,
params_atol=params_atol):
break
else: #if end of loop is reached without break
self.termination = 'maxiter'
print 'Warning: maximum number of iterations reached'
self.iterations = i
results = OneWayMixedResults(self)
#compatibility functions for fixed effects/exog
results.scale = 1
results.normalized_cov_params = self.cov_params()
return results
class OneWayMixedResults(LikelihoodModelResults):
'''Results class for OneWayMixed models
'''
def __init__(self, model):
#TODO: check, change initialization to more standard pattern
self.model = model
self.params = model.params
#need to overwrite this because we don't have a standard
#model.loglike yet
#TODO: what todo about REML loglike, logL is not normalized
@cache_readonly
def llf(self):
return self.model.logL(ML=True)
@property
def params_random_units(self):
return self.model.params_random_units
def cov_random(self):
return self.model.cov_random()
def mean_random(self, idx='lastexog'):
if idx == 'lastexog':
meanr = self.params[-self.model.k_exog_re:]
elif type(idx) == list:
if not len(idx) == self.model.k_exog_re:
raise ValueError('length of idx different from k_exog_re')
else:
meanr = self.params[idx]
else:
meanr = np.zeros(self.model.k_exog_re)
return meanr
def std_random(self):
return np.sqrt(np.diag(self.cov_random()))
def plot_random_univariate(self, bins=None, use_loc=True):
'''create plot of marginal distribution of random effects
Parameters
----------
bins : int or bin edges
option for bins in matplotlibs hist method. Current default is not
very sophisticated. All distributions use the same setting for
bins.
use_loc : bool
If True, then the distribution with mean given by the fixed
effect is used.
Returns
-------
fig : matplotlib figure instance
figure with subplots
Notes
-----
What can make this fancier?
Bin edges will not make sense if loc or scale differ across random
effect distributions.
'''
#outsource this
import matplotlib.pyplot as plt
from scipy.stats import norm as normal
fig = plt.figure()
k = self.model.k_exog_re
if k > 3:
rows, cols = int(np.ceil(k * 0.5)), 2
else:
rows, cols = k, 1
if bins is None:
#bins = self.model.n_units // 20 #TODO: just roughly, check
# bins = np.sqrt(self.model.n_units)
bins = 5 + 2 * self.model.n_units**(1./3.)
if use_loc:
loc = self.mean_random()
else:
loc = [0]*k
scale = self.std_random()
for ii in range(k):
ax = fig.add_subplot(rows, cols, ii)
freq, bins_, _ = ax.hist(loc[ii] + self.params_random_units[:,ii],
bins=bins, normed=True)
points = np.linspace(bins_[0], bins_[-1], 200)
#ax.plot(points, normal.pdf(points, loc=loc, scale=scale))
#loc of sample is approx. zero, with Z appended to X
#alternative, add fixed to mean
ax.set_title('Random Effect %d Marginal Distribution' % ii)
ax.plot(points,
normal.pdf(points, loc=loc[ii], scale=scale[ii]),
'r')
return fig
def plot_scatter_pairs(self, idx1, idx2, title=None, ax=None):
'''create scatter plot of two random effects
Parameters
----------
idx1, idx2 : int
indices of the two random effects to display, corresponding to
columns of exog_re
title : None or string
If None, then a default title is added
ax : None or matplotlib axis instance
If None, then a figure with one axis is created and returned.
If ax is not None, then the scatter plot is created on it, and
this axis instance is returned.
Returns
-------
ax_or_fig : axis or figure instance
see ax parameter
Notes
-----
Still needs ellipse from estimated parameters
'''
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax_or_fig = fig
re1 = self.params_random_units[:,idx1]
re2 = self.params_random_units[:,idx2]
ax.plot(re1, re2, 'o', alpha=0.75)
if title is None:
title = 'Random Effects %d and %d' % (idx1, idx2)
ax.set_title(title)
ax_or_fig = ax
return ax_or_fig
def plot_scatter_all_pairs(self, title=None):
from statsmodels.graphics.plot_grids import scatter_ellipse
if self.model.k_exog_re < 2:
raise ValueError('less than two variables available')
return scatter_ellipse(self.params_random_units,
ell_kwds={'color':'r'})
#ell_kwds not implemented yet
# #note I have written this already as helper function, get it
# import matplotlib.pyplot as plt
# #from scipy.stats import norm as normal
# fig = plt.figure()
# k = self.model.k_exog_re
# n_plots = k * (k - 1) // 2
# if n_plots > 3:
# rows, cols = int(np.ceil(n_plots * 0.5)), 2
# else:
# rows, cols = n_plots, 1
#
# count = 1
# for ii in range(k):
# for jj in range(ii):
# ax = fig.add_subplot(rows, cols, count)
# self.plot_scatter_pairs(ii, jj, title=None, ax=ax)
# count += 1
#
# return fig
if __name__ == '__main__':
#see examples/ex_mixed_lls_1.py
pass
| bsd-3-clause |
hhain/sdap17 | notebooks/pawel_ueb2/filters.py | 1 | 2457 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from features import *
from utility import *
## filter functions
def distortion_filter(data, column_key = "_ifft_0", label = 'target', std_thresh = 100.):
"""
replaces distortions in the data by checking for high std
replacement is performed by using previous data entry (duplication)
Returns
-------
* corrected data
* conducted number of replacements in each trx
Remark
------
run create label before
"""
std_thresh_ = std_thresh
lst = data.columns[data.columns.str.contains(column_key)]
groups = [ [x[:-2]] for x in lst]
r_data = data.copy()
n_errors = {}
# for each groups of trxs
for trx_group in groups:
trx = trx_group[0]
#print(trx)
data_diff = cf_diff(data, column_key=trx, label='target')
data_sum_groups = rf_grouped(data_diff, groups=[[trx]], fn=rf_sum_abs_single, label='target')
data_std = cf_std_window(data_sum_groups, label='target', window=3)
sel_col = data_std.columns[data_std.columns.str.contains(trx)]
# in abs sum this is the 2nd index which needs to be corrected
df_thresh_ix = np.where(data_std[sel_col] > std_thresh_ )[0]
# list empty?
if len(df_thresh_ix)==0:
n_errors[trx] = 0
continue
df_thresh_ix_groups = get_contigous_borders(df_thresh_ix)
n_errors[trx] = len(df_thresh_ix_groups)
# note: we assume that each threshold exceeded is triggered by a single problematic measurement
# in the same window, hence there are 3 high stds |0 0 1|, |0 1 0| and |1 0 0| where 1 indicates the problematic measurement
# of course we only want to correct on measurement
for g in df_thresh_ix_groups:
# only a single element
if g[1] - g[0] == 0:
# replace only this element in original data
# select columns
r_data = cf_replace(data, column_key=trx, label="target", dest_index = g[0], src_index = g[0]-1)
else:
# replace 2nd element in original data
r_data = cf_replace(data, column_key=trx, label="target", dest_index = g[0]+1, src_index = g[0]-1)
return r_data, n_errors
| mit |
neurosnap/mudicom | mudicom/image.py | 1 | 4474 | # -*- coding: utf-8 -*-
"""
mudicom.image
~~~~~~~~~~~~~
Primary image module that converts DICOM pixel data into a numpy array
as well as saving the image using Matplotlib or Pillow.
"""
import os.path
import sys
import numpy
import gdcm
class Image(object):
""" This class attempts to extract an image out of the DICOM file.
:param fname: Location and filename of DICOM file.
"""
def __init__(self, fname):
self.fname = fname
def __repr__(self):
return "<MudicomImage {0}>".format(self.fname)
def __str__(self):
return str(self.fname)
@property
def numpy(self):
""" Grabs image data and converts it to a numpy array """
# load GDCM's image reading functionality
image_reader = gdcm.ImageReader()
image_reader.SetFileName(self.fname)
if not image_reader.Read():
raise IOError("Could not read DICOM image")
pixel_array = self._gdcm_to_numpy(image_reader.GetImage())
return pixel_array
def _gdcm_to_numpy(self, image):
""" Converts a GDCM image to a numpy array.
:param image: GDCM.ImageReader.GetImage()
"""
gdcm_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy.uint16,
gdcm.PixelFormat.INT16: numpy.int16,
gdcm.PixelFormat.UINT32: numpy.uint32,
gdcm.PixelFormat.INT32: numpy.int32,
gdcm.PixelFormat.FLOAT32: numpy.float32,
gdcm.PixelFormat.FLOAT64: numpy.float64
}
pixel_format = image.GetPixelFormat().GetScalarType()
if pixel_format in gdcm_typemap:
self.data_type = gdcm_typemap[pixel_format]
else:
raise KeyError(''.join(pixel_format, \
" is not a supported pixel format"))
#dimension = image.GetDimension(0), image.GetDimension(1)
self.dimensions = image.GetDimension(1), image.GetDimension(0)
gdcm_array = image.GetBuffer()
# GDCM returns char* as type str. This converts it to type bytes
if sys.version_info >= (3, 0):
gdcm_array = gdcm_array.encode(sys.getfilesystemencoding(), "surrogateescape")
# use float for accurate scaling
dimensions = image.GetDimensions()
result = numpy.frombuffer(gdcm_array, dtype=self.data_type).astype(float)
if len(dimensions) == 3:
# for cine (animations) there are 3 dims: x, y, number of frames
result.shape = dimensions[2], dimensions[0], dimensions[1]
else:
result.shape = dimensions
return result
def save_as_plt(self, fname, pixel_array=None, vmin=None, vmax=None,
cmap=None, format=None, origin=None):
""" This method saves the image from a numpy array using matplotlib
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
:param vmin: matplotlib vmin
:param vmax: matplotlib vmax
:param cmap: matplotlib color map
:param format: matplotlib format
:param origin: matplotlib origin
This method will return True if successful
"""
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from pylab import cm
if pixel_array is None:
pixel_array = self.numpy
if cmap is None:
cmap = cm.bone
fig = Figure(figsize=pixel_array.shape[::-1], dpi=1, frameon=False)
canvas = FigureCanvas(fig)
fig.figimage(pixel_array, cmap=cmap, vmin=vmin,
vmax=vmax, origin=origin)
fig.savefig(fname, dpi=1, format=format)
return True
def save_as_pil(self, fname, pixel_array=None):
""" This method saves the image from a numpy array using Pillow
(PIL fork)
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
This method will return True if successful
"""
if pixel_array is None:
pixel_array = self.numpy
from PIL import Image as pillow
pil_image = pillow.fromarray(pixel_array.astype('uint8'))
pil_image.save(fname)
return True
| mit |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/ensemble/tests/test_gradient_boosting.py | 6 | 39791 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2*ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=1,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0, max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| gpl-2.0 |
frank-tancf/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
lbeltrame/mnegri-ov170 | programs/extract_shared_mutations.py | 1 | 20417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Luca Beltrame <luca.beltrame@marionegri.it>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import argparse
from itertools import groupby
import os
import sys
from gemini import GeminiQuery
from pathlib import Path
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
import pandas as pd
import sarge
import cruzdb
# This uses the public UCSC instance. In case MySQL is blocked, you can
# point it using a valid SQLalchemy URL to an existing database server.
ucsc = cruzdb.Genome(db="hg19")
pd.options.display.mpl_style = "default"
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Liberation Sans']})
renames = {"ts": "transition", "tv": "transversion", "ins": "insertion",
"del": "deletion"}
# These work around proper gene association bugs (due to amplicon extending
# past boundaries, or annotation picking things on the wrong strand)
substitutions = {
"ZEB1-AS1": "ZEB1", # Different strand
"RHNO1": "FOXM1", # Different strand
"C3orf72": "FOXL2", # Different strand
# "MC1R": np.nan, # past the promoter
"ACAA1": "MYD88", # Different strand
"VIM-AS1": "VIM", # Different strand
"LOC100507424": "FOXM1", # Wrong annotation?
"MTOR-AS1": "MTOR",
"EGFR-AS1": "EGFR",
"WRAP53": "TP53",
"EPM2AIP1": "MLH1",
"C5orf22": "DROSHA",
"C9orf53": "CDKN2A",
"LYRM5": "KRAS",
"N4BP2L1": "BRCA2",
"RMDN3": "RAD51",
"NBR2": "BRCA1",
"CNTD2": "AKT2",
"HSCB": "CHEK2",
"NPAT": "ATM",
"MC1R": "TUBB3"
}
def generate_phenotypes(database):
query = GeminiQuery(database)
query_string = "SELECT name, phenotype FROM samples"
phenotypes = {1: list(), 2: list()}
query.run(query_string)
for row in query:
phenotypes[int(row["phenotype"])].append(row["name"])
return phenotypes
def get_nearby_gene(chrom, start, end):
nearest = ucsc.knearest("refFlat", chrom, start, end)
nearest = pd.Series([item.geneName for item in nearest])
nearest = nearest.apply(lambda x: substitutions[x]
if x in substitutions else x)
try:
nearest = nearest.drop_duplicates().item()
except Exception:
print(nearest.drop_duplicates())
raise
# assert len(nearest) == 1
return nearest
def check_multiple_alts(chrom, start, end, alt, samples, reference_file=None):
# FIXME: PyVCF can't be used as it loads the wrong coordinate with
# fetch()
if reference_file is None:
return alt
alt = alt.split(",")
if len(alt) == 1:
return alt[0]
region = "{0}:{1}-{2}".format(chrom, start, end)
sample_display = ",".join(samples)
bcftools = ("/usr/bin/bcftools view {0} -r {1} -a -s {2}"
" --exclude-uncalled -H")
bcftools = sarge.shell_format(bcftools, reference_file, region,
sample_display)
command = sarge.capture_both(bcftools)
mutation_table = pd.read_table(command.stdout, header=None,
names=["CHROM", "POS", "ID", "REF", "ALT"],
usecols=["CHROM", "POS", "ID", "REF",
"ALT"])
if mutation_table.empty:
# Try alternative approach: sometimes bcftools asserts when
# launched from sarge
import subprocess
with open(os.devnull) as null:
cmd = subprocess.Popen(bcftools, shell=True,
stdout=subprocess.PIPE,
stderr=null)
mutation_table = pd.read_table(cmd.stdout, header=None,
names=["CHROM", "POS", "ID",
"REF", "ALT"],
usecols=["CHROM", "POS", "ID",
"REF", "ALT"])
seen_alt = mutation_table["ALT"].item()
if len(seen_alt.split(",")) > 1:
message = ("Detected more than one allele for sample pair {}: {},"
" mutation position {}-{}, ref {}")
print(message.format(", ".join(samples), seen_alt, chrom, start,
mutation_table["REF"].item()),
file=sys.stderr)
# This is a false positive! Same locus but different ALTs
return
return seen_alt
def extract_shared_mutations(database, reference_file=None):
phenotypes = generate_phenotypes(database)
query = GeminiQuery(database)
query_string = ("SELECT chrom, start, end, gene, ref, alt, type, sub_type,"
"impact, codon_change, aa_change, vcf_id, cosmic_ids"
" FROM variants WHERE in_1kg=0")
query.run(query_string, show_variant_samples=True)
rows = list()
for row in query:
variants = row.variant_samples
if any(item in phenotypes[1] for item in variants) and any(
item in phenotypes[2] for item in variants):
valid_groups = list()
chrom, start, end, alt = (row["chrom"], row["start"],
row["end"], row["alt"])
# In the case of intergenic regions, get the name of the
# closest gene
if row["gene"] is None:
gene = get_nearby_gene(chrom, start, end)
# print "None subsituted with", gene
else:
gene = row["gene"]
for gid, group in groupby(variants, lambda x: x.split("_")[1]):
# Rename according to Pandora guidelines
# Starts with 0: 1 + number
# 3 digits: 20 + number
# 4 digits: 2 + number
if len(gid) < 5:
newgid = "2" + gid if len(gid) == 4 else "20" + gid
else:
newgid = gid
group = list(group)
if len(list(group)) == 2:
# Check if we have different ALT bases for the samples
# in the same pair. If this occurs, it is a false positive
# and should be discarded. To do so, we need a VCF file to
# query by base, otherwise we take the value as-is.
if reference_file is not None:
alt = check_multiple_alts(chrom, start, end, alt,
group, reference_file)
if alt is None:
# Biallelic site for pair - discard
continue
valid_groups.append(newgid)
cosmic_data = "Yes" if row["cosmic_ids"] else "No"
data = [chrom, start, end, gene, row["ref"], alt, row["type"],
row["sub_type"], row["impact"], row["codon_change"],
row["aa_change"], row["vcf_id"], cosmic_data]
if not valid_groups:
rows.append(data + [np.nan])
else:
for gid in valid_groups:
rows.append(data + [gid])
colnames = ["chrom", "start", "end", "gene", "ref", "alt", "type",
"sub_type", "impact", "codon_change", "aa_change",
"dbsnp_id", "in_cosmic", "variants_with_pairs"]
df = pd.DataFrame.from_records(rows, columns=colnames)
df.set_index(["chrom", "start", "end", "gene", "ref", "alt"], inplace=True)
# Get rid of loci without pairs
df = df.dropna(subset=["variants_with_pairs"])
return df
def colorbar_text(ax, text):
plt.setp(ax.get_yticklabels(), visible=False)
plt.setp(ax.get_xticklabels(), visible=False)
ax.axis('off')
ax.grid(False)
ax.text(1.2, 0.25, text, horizontalalignment="right")
def _set_axis_parameters(ax, dataframe, names=True):
# put the major ticks at the middle of each cell
ax.set_yticks(np.arange(dataframe.shape[0]) + 0.5,
minor=False)
ax.set_xticks(np.arange(dataframe.shape[1]) + 0.5,
minor=False)
ax.set_xlim((0, dataframe.shape[1]))
ax.set_ylim((0, dataframe.shape[0]))
ax.invert_yaxis()
ax.xaxis.tick_top()
plt.xticks(rotation=90)
if names:
ax.set_xticklabels(dataframe.columns, minor=False, rotation=90)
else:
ax.set_xticklabels([])
ax.set_yticklabels(dataframe.index, minor=False)
# Turn off all the ticks
ax = plt.gca()
for t in ax.xaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
for t in ax.yaxis.get_major_ticks():
t.tick1On = False
t.tick2On = False
ax.grid(False)
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis"""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for sp in ax.spines.values():
sp.set_visible(False)
def colorbar_text(ax, text):
ax.axis('off')
ax.grid(False)
ax.text(1.2, 0.25, text, horizontalalignment="right")
def generate_heatmap(dataframe_with_pairs, samplelist=None,
pathway_colortable=None,
histotype_colortable=None):
import matplotlib.gridspec as grid
plt.ioff()
dataframe = dataframe_with_pairs.reset_index()
grouper = dataframe.groupby("gene")
# Get counts per gene and per sample
counter = grouper["variants_with_pairs"].value_counts()
# index.levels[0] is the gene, index.levels[1] are pairs
# The new structure is a table with genes as rows and
# sample pairs as columns
new_dataframe = pd.DataFrame(index=counter.index.levels[0],
columns=counter.index.levels[1])
new_dataframe.fillna(0, inplace=True)
# Put the counts in their proper place: we iterate over a MultiIndex of
# gene, pair couples, along with the count (the value), and we just
# use the first two as a coordinate where to put the third
for rowid, row in counter.iteritems():
new_dataframe.loc[rowid[0], rowid[1]] = row
if samplelist is not None:
# Put missing samples in
missing = samplelist[~samplelist.isin(new_dataframe.columns)]
for item in missing:
new_dataframe[item] = 0
new_dataframe.sort_index(axis=1, inplace=True)
# Add pathway elements
fig = plt.figure(figsize=(15, 10))
if pathway_colortable is not None and histotype_colortable is not None:
present_genes = pathway_colortable[
pathway_colortable.index.isin(new_dataframe.index)]
present_samples = histotype_colortable.loc[new_dataframe.index]
# Same order as the color table
new_dataframe = new_dataframe[histotype_colortable.index.astype("str")]
gs = grid.GridSpec(3, 2, width_ratios=[0.5, 15],
height_ratios=[0.2, 0.2, 15])
pathway_ax = fig.add_subplot(gs[2, 0]) # Pathways
heatmap_ax = fig.add_subplot(gs[2, 1], sharey=pathway_ax) # Heatmap
histotype_ax = fig.add_subplot(gs[0, 1]) # Histotype bar
grade_ax = fig.add_subplot(gs[1, 1]) # Grade bar
# Those stay in the top row, and represent the labels next
# to the color bars
bartext_ax = fig.add_subplot(gs[0, 0]) # Histotype
gradetext_ax = fig.add_subplot(gs[1, 0]) # Grade
clean_axis(histotype_ax)
clean_axis(grade_ax)
clean_axis(bartext_ax)
clean_axis(gradetext_ax)
# These will be our labels on top of histotype and grade
create_colorbar(histotype_colortable, histotype_ax, False,
labels=True)
create_colorbar(histotype_colortable, grade_ax, False, "Grade_color",
labels=False)
# Labels for histotype and grade
colorbar_text(bartext_ax, "Histotype")
colorbar_text(gradetext_ax, "Grade")
new_dataframe = new_dataframe.loc[present_genes.index]
plt.setp(heatmap_ax.get_yticklabels(), visible=False)
create_colorbar(present_genes, pathway_ax)
else:
fig.add_subplot(1, 1, 1)
heatmap_ax = fig.axes[0]
cmap, norm = create_colormap()
heatmap = heatmap_ax.pcolor(new_dataframe, cmap=cmap,
edgecolors="black", alpha=1, norm=norm)
labels = True if pathway_colortable is None else False
_set_axis_parameters(heatmap_ax, new_dataframe, names=labels)
cax = fig.add_axes([-0.05, 1.025, 0.15, 0.025])
cbar = fig.colorbar(heatmap, cax=cax, orientation="horizontal",
ticks=range(5)) # HACK: Hardcoded!
cbar.solids.set_edgecolor("face")
if pathway_colortable is not None:
plt.tight_layout()
return fig, cax
def _reduce_value(value):
if value.name != "samples":
# Get the unique name, should be identical
return value.unique().item()
return ", ".join(value)
def _correct_names(name):
name = name.split("_")[1]
if len(name) < 5:
name = "2" + name if len(name) == 4 else "20" + name
return name
def create_colorbar(dataframe, ax, row=True, color_col="Color", labels=True):
gene_names = dataframe.index
colors = dataframe[color_col]
length = list(range(len(dataframe.index)))
matrix, cmap = color_list_to_matrix_and_cmap(colors, length, row=row)
mesh = ax.pcolormesh(matrix, cmap=cmap)
if not row:
ax.set_xlim(0, matrix.shape[1])
ax.set_ylim((0, matrix.shape[0]))
ax.set_xticks(np.arange(matrix.shape[1]) + 0.5,
minor=False)
else:
ax.set_xlim(0, matrix.shape[1])
ax.set_ylim((0, matrix.shape[0]))
ax.set_yticks(np.arange(matrix.shape[0]) + 0.5,
minor=False)
if not row:
ax.invert_yaxis()
ax.xaxis.tick_top()
if labels:
ax.set_xticklabels(gene_names, minor=False, rotation=90)
else:
ax.set_xticklabels([])
ax.set_yticks([])
else:
# We invert gene_names because in this case the x axis is flipped
# (heatmap)
if labels:
ax.set_yticklabels(gene_names[::-1], minor=False)
else:
ax.set_yticklabels([])
ax.set_xticks([])
return mesh
def color_list_to_matrix_and_cmap(colors, ind, row=True):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
For 'heatmap()'
This only works for 1-column color lists..
These arguments can now be plotted using matplotlib.pcolormesh(matrix,
cmap) and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
row : bool
Is this to label the rows or columns? Default True.
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
cmap : matplotlib.colors.ListedColormap
This function was taken from https://github.com/olgabot/seaborn and
is under a BSD license.
"""
# TODO: Support multiple color labels on an element in the heatmap
import matplotlib as mpl
colors_original = colors
colors = set(colors)
col_to_value = dict((col, i) for i, col in enumerate(colors))
matrix = np.array([col_to_value[col] for col in colors_original])[ind]
# Is this row-side or column side?
if row:
# shape of matrix: nrows x 1
new_shape = (len(colors_original), 1)
else:
# shape of matrix: 1 x ncols
new_shape = (1, len(colors_original))
matrix = matrix.reshape(new_shape)
cmap = mpl.colors.ListedColormap(colors)
return matrix, cmap
def create_colormap():
# HACK: max is hardcoded!
norm = colors.Normalize(vmin=0, vmax=4)
# define the colormap
cmap = plt.cm.autumn
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# force the first color entry to be grey
cmaplist[0] = (.5, .5, .5, 1.0)
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
return cmap, norm
def save_figure(fig, destination, extra_artist=None):
from pathlib import Path
name = Path(destination)
if extra_artist is not None:
extra_args = {"extra_artists": (extra_artist, ),
"bbox_inches": "tight"}
else:
extra_args = {}
for extension in [".pdf", ".svgz", ".png"]:
if extension != "png":
fig.savefig(str(name.with_suffix(extension)), **extra_args)
else:
fig.savefig(str(name.with_suffix(extension)), dpi=300,
**extra_args)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--reference-file", help="Reference VCF file")
parser.add_argument("--heatmap", help="Plot a heatmap",
action="store_true")
parser.add_argument("-c", "--color-table",
help="Add a pathway color table")
parser.add_argument("-f", "--phenotype-table",
help="Add a phenotype/grade color table")
parser.add_argument("--keep-synonymous", action="store_true",
help="Keep synonymous mutations in the table")
parser.add_argument("database", help="Source GEMINI database")
parser.add_argument("destination", help="Destination table to save to")
options = parser.parse_args()
destination = Path(options.destination)
print("Extracting shared mutations...", file=sys.stderr)
df = extract_shared_mutations(options.database, options.reference_file)
if not options.keep_synonymous:
df = df[df.impact != "synonymous_coding"]
else:
print("Including synonymous mutations.", file=sys.stderr)
df.to_csv(str(destination), sep="\t", na_rep="NA")
print("Saved text file: {}".format(destination), file=sys.stderr)
# User-visible file: rename the column to make more sense
xls_df = df.rename(columns={"variants_with_pairs": "samples"})
grouper = xls_df.groupby(level=[0, 1, 2, 3, 4, 5])
# Put all common samples for one mutation on one line
xls_df = grouper.aggregate(_reduce_value)
# Rename for clearer understanding
xls_df.sub_type = xls_df.sub_type.apply(
lambda x: renames.get(x, "unknown"))
xlsx_destination = destination.with_suffix(".xlsx")
xls_df.to_excel(str(xlsx_destination), sheet_name="Result",
na_rep="NA", merge_cells=False)
print("Saved xlsx file: {}".format(xlsx_destination), file=sys.stderr)
if options.heatmap:
samplelist = generate_phenotypes(options.database)
samplelist = pd.Series(samplelist[1])
samplelist = samplelist.apply(_correct_names).drop_duplicates()
if options.color_table is not None:
colortable = pd.read_table(options.color_table, index_col=0)
else:
colortable = None
if options.phenotype_table is not None:
phenotype_table = pd.read_table(options.phenotype_table,
index_col=0)
else:
phenotype_table = None
fig, cax = generate_heatmap(df, samplelist, colortable,
phenotype_table)
save_figure(fig, destination, extra_artist=cax)
if __name__ == '__main__':
main()
| gpl-2.0 |
evgchz/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 9 | 15843 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
"""Check consistency on dataset iris."""
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
"""Check classification with sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
"""Check regression with sparse input."""
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(probability=True),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(probability=True),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
BryanCutler/spark | python/pyspark/pandas/tests/test_config.py | 1 | 6435 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import pandas as ps
from pyspark.pandas import config
from pyspark.pandas.config import Option, DictWrapper
from pyspark.pandas.testing.utils import ReusedSQLTestCase
class ConfigTest(ReusedSQLTestCase):
def setUp(self):
config._options_dict["test.config"] = Option(key="test.config", doc="", default="default")
config._options_dict["test.config.list"] = Option(
key="test.config.list", doc="", default=[], types=list
)
config._options_dict["test.config.float"] = Option(
key="test.config.float", doc="", default=1.2, types=float
)
config._options_dict["test.config.int"] = Option(
key="test.config.int",
doc="",
default=1,
types=int,
check_func=(lambda v: v > 0, "bigger then 0"),
)
config._options_dict["test.config.int.none"] = Option(
key="test.config.int", doc="", default=None, types=(int, type(None))
)
def tearDown(self):
ps.reset_option("test.config")
del config._options_dict["test.config"]
del config._options_dict["test.config.list"]
del config._options_dict["test.config.float"]
del config._options_dict["test.config.int"]
del config._options_dict["test.config.int.none"]
def test_get_set_reset_option(self):
self.assertEqual(ps.get_option("test.config"), "default")
ps.set_option("test.config", "value")
self.assertEqual(ps.get_option("test.config"), "value")
ps.reset_option("test.config")
self.assertEqual(ps.get_option("test.config"), "default")
def test_get_set_reset_option_different_types(self):
ps.set_option("test.config.list", [1, 2, 3, 4])
self.assertEqual(ps.get_option("test.config.list"), [1, 2, 3, 4])
ps.set_option("test.config.float", 5.0)
self.assertEqual(ps.get_option("test.config.float"), 5.0)
ps.set_option("test.config.int", 123)
self.assertEqual(ps.get_option("test.config.int"), 123)
self.assertEqual(ps.get_option("test.config.int.none"), None) # default None
ps.set_option("test.config.int.none", 123)
self.assertEqual(ps.get_option("test.config.int.none"), 123)
ps.set_option("test.config.int.none", None)
self.assertEqual(ps.get_option("test.config.int.none"), None)
def test_different_types(self):
with self.assertRaisesRegex(ValueError, "was <class 'int'>"):
ps.set_option("test.config.list", 1)
with self.assertRaisesRegex(ValueError, "however, expected types are"):
ps.set_option("test.config.float", "abc")
with self.assertRaisesRegex(ValueError, "[<class 'int'>]"):
ps.set_option("test.config.int", "abc")
with self.assertRaisesRegex(ValueError, "(<class 'int'>, <class 'NoneType'>)"):
ps.set_option("test.config.int.none", "abc")
def test_check_func(self):
with self.assertRaisesRegex(ValueError, "bigger then 0"):
ps.set_option("test.config.int", -1)
def test_unknown_option(self):
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.get_option("unknown")
with self.assertRaisesRegex(config.OptionError, "Available options"):
ps.set_option("unknown", "value")
with self.assertRaisesRegex(config.OptionError, "test.config"):
ps.reset_option("unknown")
def test_namespace_access(self):
try:
self.assertEqual(ps.options.compute.max_rows, ps.get_option("compute.max_rows"))
ps.options.compute.max_rows = 0
self.assertEqual(ps.options.compute.max_rows, 0)
self.assertTrue(isinstance(ps.options.compute, DictWrapper))
wrapper = ps.options.compute
self.assertEqual(wrapper.max_rows, ps.get_option("compute.max_rows"))
wrapper.max_rows = 1000
self.assertEqual(ps.options.compute.max_rows, 1000)
self.assertRaisesRegex(config.OptionError, "No such option", lambda: ps.options.compu)
self.assertRaisesRegex(
config.OptionError, "No such option", lambda: ps.options.compute.max
)
self.assertRaisesRegex(
config.OptionError, "No such option", lambda: ps.options.max_rows1
)
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.options.compute.max = 0
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.options.compute = 0
with self.assertRaisesRegex(config.OptionError, "No such option"):
ps.options.com = 0
finally:
ps.reset_option("compute.max_rows")
def test_dir_options(self):
self.assertTrue("compute.default_index_type" in dir(ps.options))
self.assertTrue("plotting.sample_ratio" in dir(ps.options))
self.assertTrue("default_index_type" in dir(ps.options.compute))
self.assertTrue("sample_ratio" not in dir(ps.options.compute))
self.assertTrue("default_index_type" not in dir(ps.options.plotting))
self.assertTrue("sample_ratio" in dir(ps.options.plotting))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_config import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Extintor/piva | practica4/p4script5.py | 1 | 1108 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 6 18:04:51 2016
@author: paul
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
def turn_to_grayscale(img):
try:
red = img[:,:,0]
green = img[:,:,1]
blue = img[:,:,2]
img = np.uint8(np.add(np.add(red*0.299,green*0.587),blue*0.114))
except:
pass
return img
def cercle(s,radi):
y,x = np.ogrid[-s[0]/2: s[0]/2, -s[1]/2: s[1]/2]
mascara = (x**2 + y**2 < radi**2)
return mascara
if __name__ == "__main__":
img = turn_to_grayscale(plt.imread('../Imatges/imatge16.jpg'))
plt.figure()
plt.imshow(np.absolute(img),cmap="gray")
print img.shape
fftimg = np.fft.fft2(img)
fftimg = np.fft.fftshift(fftimg)
plt.figure()
plt.imshow(np.absolute(np.log(fftimg)))
radi = 80
idx = cercle(img.shape,radi)
plt.figure()
plt.imshow(idx)
fftimg = idx*fftimg
fftimg = np.fft.ifftshift(fftimg)
img = np.fft.ifft2(fftimg)
plt.figure()
plt.imshow(np.absolute(img),cmap="gray")
| gpl-3.0 |
ltiao/networkx | examples/graph/unix_email.py | 26 | 2678 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| bsd-3-clause |
automl/paramsklearn | tests/components/feature_preprocessing/test_select_percentile_classification.py | 1 | 4693 | import unittest
import numpy as np
import scipy.sparse
import sklearn.preprocessing
from ParamSklearn.components.feature_preprocessing.select_percentile_classification import SelectPercentileClassification
from ParamSklearn.util import _test_preprocessing, get_dataset
class SelectPercentileClassificationTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(SelectPercentileClassification)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int(original.shape[1]/2))
self.assertFalse((transformation == 0).all())
transformation, original = _test_preprocessing(SelectPercentileClassification, make_sparse=True)
self.assertTrue(scipy.sparse.issparse(transformation))
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int(original.shape[1]/2))
# Custon preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1,
**{hp_name: default[hp_name] for hp_name in
default if default[hp_name] is not None})
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], int(original.shape[1] / 2))
def test_preprocessing_dtype(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("iris")
self.assertEqual(X_train.dtype, np.float32)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("iris")
X_train = X_train.astype(np.float64)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
# Sparse
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True)
X_train = X_train.astype(np.float64)
configuration_space = SelectPercentileClassification.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = SelectPercentileClassification(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| bsd-3-clause |
Myasuka/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
wlamond/scikit-learn | examples/mixture/plot_concentration_prior.py | 16 | 5657 | """
========================================================================
Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture
========================================================================
This example plots the ellipsoids obtained from a toy dataset (mixture of three
Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a
Dirichlet distribution prior
(``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet
process prior (``weight_concentration_prior_type='dirichlet_process'``). On
each figure, we plot the results for three different values of the weight
concentration prior.
The ``BayesianGaussianMixture`` class can adapt its number of mixture
componentsautomatically. The parameter ``weight_concentration_prior`` has a
direct link with the resulting number of components with non-zero weights.
Specifying a low value for the concentration prior will make the model put most
of the weight on few components set the remaining components weights very close
to zero. High values of the concentration prior will allow a larger number of
components to be active in the mixture.
The Dirichlet process prior allows to define an infinite number of components
and automatically selects the correct number of components: it activates a
component only if it is necessary.
On the contrary the classical finite mixture model with a Dirichlet
distribution prior will favor more uniformly weighted components and therefore
tends to divide natural clusters into unnecessary sub-components.
"""
# Author: Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
eig_vals, eig_vecs = np.linalg.eigh(covars[n])
unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0])
angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0])
# Ellipse needs degrees
angle = 180 * angle / np.pi
# eigenvector normalization
eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals)
ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1],
180 + angle)
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ell.set_facecolor('#56B4E9')
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False):
ax1.set_title(title)
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3,
align='center')
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left='off',
right='off', labelleft='off')
ax2.tick_params(axis='x', which='both', top='off')
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters of the dataset
random_state, n_components, n_features = 2, 3, 2
colors = np.array(['#0072B2', '#F0E442', '#D55E00'])
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# mean_precision_prior= 0.8 to minimize the influence of the prior
estimators = [
("Finite mixture with a Dirichlet distribution\nprior and "
r"$\gamma_0=$", BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_distribution",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [0.001, 1, 1000]),
("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$",
BayesianGaussianMixture(
weight_concentration_prior_type="dirichlet_process",
n_components=2 * n_components, reg_covar=0, init_params='random',
max_iter=1500, mean_precision_prior=.8,
random_state=random_state), [1, 1000, 100000])]
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([j * np.ones(samples[j], dtype=int)
for j in range(n_components)])
# Plot results in two different figures
for (title, estimator, concentrations_prior) in estimators:
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05,
left=.03, right=.99)
gs = gridspec.GridSpec(3, len(concentrations_prior))
for k, concentration in enumerate(concentrations_prior):
estimator.weight_concentration_prior = concentration
estimator.fit(X)
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator,
X, y, r"%s$%.1e$" % (title, concentration),
plot_title=k == 0)
plt.show()
| bsd-3-clause |
matbra/bokeh | bokeh/tests/test_protocol.py | 42 | 3959 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
self.assertEqual(self.encoder.default(nptrue), True)
self.assertIsInstance(self.encoder.default(nptrue), bool)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh.protocol import serialize_json, deserialize_json
self.serialize = serialize_json
self.deserialize = deserialize_json
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
diana-hep/carl | tests/distributions/test_exponential.py | 1 | 1703 | # Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
import scipy.stats as st
from numpy.testing import assert_array_almost_equal
from sklearn.utils import check_random_state
from carl.distributions import Exponential
def check_exponential(inverse_scale):
rng = check_random_state(1)
p_carl = Exponential(inverse_scale=inverse_scale)
p_scipy = st.expon(scale=1. / inverse_scale)
X = rng.rand(50, 1)
assert_array_almost_equal(p_carl.pdf(X),
p_scipy.pdf(X.ravel()))
assert_array_almost_equal(p_carl.cdf(X),
p_scipy.cdf(X.ravel()))
assert_array_almost_equal(-np.log(p_carl.pdf(X)),
p_carl.nll(X))
def test_exponential():
for inverse_scale in [1, 2, 5]:
yield check_exponential, inverse_scale
def check_rvs(inverse_scale, random_state):
p = Exponential(inverse_scale=inverse_scale)
samples = p.rvs(1000, random_state=random_state)
assert np.abs(np.mean(samples) - 1. / inverse_scale) <= 0.05
def test_rvs():
for inverse_scale, random_state in [(1, 0), (1, 1),
(2, 3), (0.5, 4)]:
yield check_rvs, inverse_scale, random_state
def check_fit(inverse_scale):
p = Exponential()
X = st.expon(scale=1. / inverse_scale).rvs(5000,
random_state=0).reshape(-1, 1)
p.fit(X)
assert np.abs(p.inverse_scale.get_value() - inverse_scale) <= 0.1
def test_fit():
for inverse_scale in [1, 2, 5]:
yield check_fit, inverse_scale
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/tsa/filters/bk_filter.py | 4 | 3244 | from __future__ import absolute_import
import numpy as np
from scipy.signal import fftconvolve
from ._utils import _maybe_get_pandas_wrapper
def bkfilter(X, low=6, high=32, K=12):
"""
Baxter-King bandpass filter
Parameters
----------
X : array-like
A 1 or 2d ndarray. If 2d, variables are assumed to be in columns.
low : float
Minimum period for oscillations, ie., Baxter and King suggest that
the Burns-Mitchell U.S. business cycle has 6 for quarterly data and
1.5 for annual data.
high : float
Maximum period for oscillations BK suggest that the U.S.
business cycle has 32 for quarterly data and 8 for annual data.
K : int
Lead-lag length of the filter. Baxter and King propose a truncation
length of 12 for quarterly data and 3 for annual data.
Returns
-------
Y : array
Cyclical component of X
References
---------- ::
Baxter, M. and R. G. King. "Measuring Business Cycles: Approximate
Band-Pass Filters for Economic Time Series." *Review of Economics and
Statistics*, 1999, 81(4), 575-593.
Notes
-----
Returns a centered weighted moving average of the original series. Where
the weights a[j] are computed ::
a[j] = b[j] + theta, for j = 0, +/-1, +/-2, ... +/- K
b[0] = (omega_2 - omega_1)/pi
b[j] = 1/(pi*j)(sin(omega_2*j)-sin(omega_1*j), for j = +/-1, +/-2,...
and theta is a normalizing constant ::
theta = -sum(b)/(2K+1)
Examples
--------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> index = pd.DatetimeIndex(start='1959Q1', end='2009Q4', freq='Q')
>>> dta.set_index(index, inplace=True)
>>> cycles = sm.tsa.filters.bkfilter(dta[['realinv']], 6, 24, 12)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> cycles.plot(ax=ax, style=['r--', 'b-'])
>>> plt.show()
.. plot:: plots/bkf_plot.py
See Also
--------
statsmodels.tsa.filters.cf_filter.cffilter
statsmodels.tsa.filters.hp_filter.hpfilter
statsmodels.tsa.seasonal.seasonal_decompose
"""
#TODO: change the docstring to ..math::?
#TODO: allow windowing functions to correct for Gibb's Phenomenon?
# adjust bweights (symmetrically) by below before demeaning
# Lancosz Sigma Factors np.sinc(2*j/(2.*K+1))
_pandas_wrapper = _maybe_get_pandas_wrapper(X, K, K)
X = np.asarray(X)
omega_1 = 2.*np.pi/high # convert from freq. to periodicity
omega_2 = 2.*np.pi/low
bweights = np.zeros(2*K+1)
bweights[K] = (omega_2 - omega_1)/np.pi # weight at zero freq.
j = np.arange(1,int(K)+1)
weights = 1/(np.pi*j)*(np.sin(omega_2*j)-np.sin(omega_1*j))
bweights[K+j] = weights # j is an idx
bweights[:K] = weights[::-1] # make symmetric weights
bweights -= bweights.mean() # make sure weights sum to zero
if X.ndim == 2:
bweights = bweights[:,None]
X = fftconvolve(X, bweights, mode='valid') # get a centered moving avg/
# convolution
if _pandas_wrapper is not None:
return _pandas_wrapper(X)
return X
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/neighbors/__init__.py | 71 | 1025 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
from .lof import LocalOutlierFactor
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest',
'LocalOutlierFactor']
| bsd-3-clause |
pdebuyl/RMPCDMD | experiments/03-single-janus/plot_planar.py | 2 | 1110 | #!/usr/bin/env python
"""
Display the planar concentration and velocity fields of a RMPCDMD simulation.
"""
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file', help="H5MD file")
parser.add_argument('--species', type=int, default=0)
args = parser.parse_args()
import h5py
import matplotlib.pyplot as plt
import numpy as np
with h5py.File(args.file, 'r') as f:
c = f['fields/planar_concentration']
v = f['fields/planar_velocity']
x_min = c.attrs['x_min'][()]
dx = c.attrs['dx'][()]
y_min = c.attrs['y_min'][()]
dy = c.attrs['dy'][()]
thickness = c.attrs['thickness'][()]
c = c[:]
v = v[:]
N_x, N_y = c.shape[:2]
# x and y must overshoot c.shape by one for pcolormesh
x = x_min + np.arange(N_x+1)*dx
y = y_min + np.arange(N_y+1)*dy
c /= dx*dy*thickness
plt.subplot(121, aspect=1)
plt.pcolormesh(x, y, c[:,:,args.species].T, cmap=plt.cm.viridis)
plt.colorbar()
plt.subplot(122, aspect=1)
x, y = np.meshgrid(x[:-1], y[:-1])
plt.quiver(x, y, v[:,:,args.species,0].T, v[:,:,args.species,1].T)
plt.show()
| bsd-3-clause |
linksuccess/linksuccess | parsingframework/structural_statistics.py | 1 | 28185 | from __future__ import division
import numpy as np
from graph_tool.all import *
from matplotlib import pyplot as plt
import pickle
import powerlaw
import fitpowerlaw as fp
from wsd.database import MySQLDatabase
from graph_tool.all import *
from conf import *
from matplotlib import style
style.use('acm-2col-bmh')
import pylab
params = {
'font.family' : 'serif',
'font.serif' : ['Times New Roman'],
'font.size' : 7
}
pylab.rcParams.update(params)
def plot_stats():
# wikipedia graph structural statistics
print 'before load'
network = load_graph("output/wikipedianetwork.xml.gz")
print 'after load'
out_hist = vertex_hist(network, "out")
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.plot(out_hist[1][:-1], out_hist[0], marker='o')
plt.xlabel('Out-degree')
plt.ylabel('Frequency')
plt.gca().set_ylim([1, 10**6])
#plt.title('Out-degree Distribution')
plt.tight_layout()
plt.savefig('output/wikipedia-out-deg-dist.pdf')
plt.clf()
in_hist = vertex_hist(network, "in")
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.plot(in_hist[1][:-1], in_hist[0], marker='o')
plt.xlabel('In-degree')
plt.ylabel('Frequency')
plt.gca().set_ylim([1, 10**6])
#plt.title('In-degree Distribution')
plt.tight_layout()
plt.savefig('output/wikipedia-in-deg-dist.pdf')
plt.clf()
total_hist = vertex_hist(network, "total")
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.plot(total_hist[1][:-1], total_hist[0], marker='o')
plt.xlabel('Degree')
plt.ylabel('Frequency')
plt.gca().set_ylim([1, 10**6])
#plt.title('Degree Distribution')
plt.tight_layout()
plt.savefig('output/wikipedia-deg-dist.pdf')
plt.clf()
clust = network.vertex_properties["local_clust"]
#clust = local_clustering(network, undirected=False)
#hist, bin_edges = np.histogram(clust.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Local Clustering Coefficient C')
#plt.ylabel('P(x<=C)')
#plt.title('Clustering Coefficient Distribution')
#plt.savefig('output/wikipedia-clust-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(clust.get_array(), ax)
#ax.set_title('Clustering Coefficient Distribution')
ax.set_xlabel('Local Clustering Coefficient $C')
ax.set_ylabel('P(x<=C)')
ax.set_ylim([0, 1])
fig.tight_layout()
fig.savefig('output/wikipedia-clust-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(clust.get_array(), ax)
#ax.set_title('Clustering Coefficient Distribution')
ax.set_xlabel('Local Clustering Coefficient C')
ax.set_ylabel('P(x>=C)')
ax.set_ylim([10**-4, 10**-0.5])
fig.tight_layout()
fig.savefig('output/wikipedia-clust-ccdf.pdf')
plt.clf()
prank = network.vertex_properties["page_rank"]
#hist, bin_edges = np.histogram(prank.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Page rank Pr')
#plt.ylabel('P(x<=Pr)')
#plt.title('Page rank Distribution')
#plt.savefig('output/wikipedia-prank-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(prank.get_array(), ax)
#ax.set_title('Page Rank Distribution')
ax.set_xlabel('Page rank Pr')
ax.set_ylabel('P(x<=Pr)')
ax.set_ylim([0, 1])
fig.tight_layout()
fig.savefig('output/wikipedia-prank-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(prank.get_array(), ax)
#ax.set_title('Page Rank Distribution')
ax.set_xlabel('Page rank Pr')
ax.set_ylabel('P(x>=Pr)')
fig.tight_layout()
fig.savefig('output/wikipedia-prank-ccdf.pdf')
plt.clf()
kcore = network.vertex_properties["kcore"]
#hist, bin_edges = np.histogram(kcore.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Kcore kC')
#plt.ylabel('P(x<=kC)')
#plt.title('K-Core Distribution')
#plt.savefig('output/wikipedia-kcore-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(kcore.get_array(), ax)
#ax.set_title('K-Core Distribution')
ax.set_xlabel('k-Core kC')
ax.set_ylabel('P(x<=kC)')
ax.set_ylim([0, 1])
fig.tight_layout()
fig.savefig('output/wikipedia-kcore-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(kcore.get_array(), ax)
#ax.set_title('K-Core Distribution')
ax.set_xlabel('k-Core kC')
ax.set_ylabel('P(x>=kC)')
fig.tight_layout()
fig.savefig('output/wikipedia-kcore-ccdf.pdf')
plt.clf()
eigenvector_centr = network.vertex_properties["eigenvector_centr"]
#hist, bin_edges = np.histogram(eigenvector_centr.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Eigenvector Centrality E')
#plt.ylabel('P(x<=E)')
#plt.title('Eigenvector Centrality Distribution')
#plt.savefig('output/wikipedia-eigenvcentr-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(eigenvector_centr.get_array(), ax)
#ax.set_title('Eigenvector Centrality E')
ax.set_xlabel('Eigenvector Centrality E')
ax.set_ylabel('P(x<=E)')
ax.set_ylim([0, 1])
fig.tight_layout()
fig.savefig('output/wikipedia-eigenvcentr-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(eigenvector_centr.get_array(), ax)
#ax.set_title('Eigenvector Centrality E')
ax.set_xlabel('Eigenvector Centrality E')
ax.set_ylabel('P(x>=E)')
fig.tight_layout()
fig.savefig('output/wikipedia-eigenvcentr-ccdf.pdf')
plt.clf()
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','page_rank', 'hub', 'authority', 'kcore']:
feature = network.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X>=f)$')
ax.set_ylim([0, 1])
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-features-cdf.pdf')
plt.clf()
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','eigenvector_centr','page_rank', 'hub', 'authority', 'kcore']:
feature = network.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X<=f)$')
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-features-ccdf.pdf')
plt.clf()
# wikipedia transitions graph structural statistics
print 'before load'
network_transitions = load_graph("output/transitionsnetwork.xml.gz")
print 'after load'
out_hist = vertex_hist(network_transitions, "out")
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.plot(out_hist[1][:-1], out_hist[0], marker='o')
plt.xlabel('Out-degree')
plt.ylabel('Frequency')
plt.gca().set_ylim([1, 10**6])
#plt.title('Out-degree Distribution')
plt.savefig('output/wikipedia-transitions-out-deg-dist.pdf')
plt.clf()
in_hist = vertex_hist(network_transitions, "in")
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.plot(in_hist[1][:-1], in_hist[0], marker='o')
plt.xlabel('In-degree')
plt.ylabel('Frequency')
#plt.title('In-degree Distribution')
plt.gca().set_ylim([1, 10**6])
plt.savefig('output/wikipedia-transitions-in-deg-dist.pdf')
plt.clf()
total_hist = vertex_hist(network_transitions, "total")
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.plot(total_hist[1][:-1], total_hist[0], marker='o')
plt.xlabel('Degree')
plt.ylabel('Frequency')
#plt.title('Degree Distribution')
plt.gca().set_ylim([1, 10**6])
plt.savefig('output/wikipedia-transitions-deg-dist.pdf')
plt.clf()
#clust = local_clustering(network_transitions, undirected=False)
clust = network_transitions.vertex_properties["local_clust"]
#hist, bin_edges = np.histogram(clust.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Local Clustering Coefficient C')
#plt.ylabel('P(x<=C)')
#plt.title('Clustering Coefficient Distribution')
#plt.savefig('output/wikipedia-transitions-clust-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(clust.get_array(), ax)
#ax.set_title('Clustering Coefficient Distribution')
ax.set_xlabel('Local Clustering Coefficient C')
ax.set_ylabel('P(x<=C)')
fig.savefig('output/wikipedia-transitions-clust-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(clust.get_array(), ax)
ax.set_title('Clustering Coefficient Distribution')
ax.set_xlabel('Local Clustering Coefficient C')
ax.set_ylabel('P(x>=C)')
fig.savefig('output/wikipedia-transitions-clust-ccdf.pdf')
plt.clf()
prank = network_transitions.vertex_properties["page_rank"]
#hist, bin_edges = np.histogram(prank.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Page rank Pr')
#plt.ylabel('P(x<=Pr)')
#plt.title('Page rank Distribution')
#plt.savefig('output/wikipedia-transitions-prank-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(prank.get_array(), ax)
#ax.set_title('Page Rank Distribution')
ax.set_xlabel('Page rank Pr')
ax.set_ylabel('P(x<=Pr)')
fig.savefig('output/wikipedia-transitions-prank-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(prank.get_array(), ax)
#ax.set_title('Page Rank Distribution')
ax.set_xlabel('Page rank Pr')
ax.set_ylabel('P(x>=Pr)')
fig.savefig('output/wikipedia-transitions-prank-ccdf.pdf')
plt.clf()
kcore = network_transitions.vertex_properties["kcore"]
#hist, bin_edges = np.histogram(kcore.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Kcore kC')
#plt.ylabel('P(x<=kC)')
#plt.title('K-Core Distribution')
#plt.savefig('output/wikipedia-transitions-kcore-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(kcore.get_array(), ax)
#ax.set_title('K-Core Distribution')
ax.set_xlabel('k-Core kC')
ax.set_ylabel('P(x<=kC)')
fig.savefig('output/wikipedia-transitions-kcore-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(kcore.get_array(), ax)
#ax.set_title('K-Core Distribution')
ax.set_xlabel('k-Core kC')
ax.set_ylabel('P(x>=kC)')
fig.savefig('output/wikipedia-transitions-kcore-ccdf.pdf')
plt.clf()
eigenvector_centr = network_transitions.vertex_properties["eigenvector_centr"]
#hist, bin_edges = np.histogram(eigenvector_centr.get_array(), 100, density=True)
#cdf = np.cumsum(hist)
#plt.plot(bin_edges[1:], cdf, marker='o')
#plt.xlabel('Eingenvector centrality E')
#plt.ylabel('P(x<=E)')
#plt.title('Eigenvector Centrality Distribution')
#plt.savefig('output/wikipedia-transitions-eigenvcentr-cdf.pdf')
fig, ax = plt.subplots()
powerlaw.plot_cdf(eigenvector_centr.get_array(), ax)
#ax.set_title('Eigenvector Centrality Distribution')
ax.set_xlabel('Eingenvector centrality E')
ax.set_ylabel('P(x<=E)')
fig.savefig('output/wikipedia-transitions-eigenvcentr-cdf.pdf')
plt.clf()
fig, ax = plt.subplots()
powerlaw.plot_ccdf(eigenvector_centr.get_array(), ax)
#ax.set_title('Eigenvector Centrality Distribution')
ax.set_xlabel('Eingenvector centrality E')
ax.set_ylabel('P(x>=E)')
fig.savefig('output/wikipedia-transitions-eigenvcentr-ccdf.pdf')
plt.clf()
print 'before hits'
#ee, authority, hub = hits(network_transitions)
#network_transitions.vertex_properties["authority"] = authority
#network_transitions.vertex_properties["hub"] = hub
#network_transitions.save("output/transitionsnetwork.xml.gz")
print 'after hits'
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','page_rank', 'hub', 'authority', 'kcore']:
feature = network_transitions.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X>=f)$')
ax.set_ylim([0, 1])
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-transitions-features-cdf.pdf')
plt.clf()
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','page_rank', 'hub', 'authority', 'kcore']:
feature = network_transitions.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X<=f)$')
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-transitions-features-ccdf.pdf')
plt.clf()
def plot_degree():
# wikipedia graph structural statistics
print 'before load'
network = load_graph("output/wikipedianetwork.xml.gz")
print 'after load'
print 'before load'
network_transitions = load_graph("output/transitionsnetwork.xml.gz")
print 'after load'
out_hist = vertex_hist(network, "out")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='wikipedia', color='b')
out_hist = vertex_hist(network_transitions, "out")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='transitions', color='r')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':4}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlabel('Out-degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-outdegree.pdf')
out_hist = vertex_hist(network, "in")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='wikipedia', color='b')
out_hist = vertex_hist(network_transitions, "in")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='transitions', color='r')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':4}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlabel('In-degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-indegree.pdf')
out_hist = vertex_hist(network, "total")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='wikipeida', color='b')
out_hist = vertex_hist(network_transitions, "total")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='transitions', color='r')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':4}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlabel('Degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-degree.pdf')
def plot_degree_filtered():
# wikipedia graph structural statistics
print 'before load'
network = load_graph("output/wikipedianetwork.xml.gz")
print 'after load'
print 'before load'
network_transitions = load_graph("output/transitionsnetwork.xml.gz")
print 'after load'
network = GraphView(network, vfilt=filter_transitions(network,network_transitions))
print 'filter out'
out_hist = vertex_hist(network, "out")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='wikipedia', color='b')
out_hist = vertex_hist(network_transitions, "out")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='transitions', color='r')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':4}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlim([10**0, 10**6])
ax.set_xlabel('Out-degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-outdegree-filtered.pdf')
out_hist = vertex_hist(network, "in")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='wikipedia', color='b')
out_hist = vertex_hist(network_transitions, "in")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='transitions', color='r')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':4}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlim([10**0, 10**6])
ax.set_xlabel('In-degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-indegree-filtered.pdf')
out_hist = vertex_hist(network, "total")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='wikipeida', color='b')
out_hist = vertex_hist(network_transitions, "total")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, label='transitions', color='r')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':4}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlim([10**0, 10**6])
ax.set_xlabel('Degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-degree-filtered.pdf')
print 'fit'
fp.powerLawExponent('transitions', network_transitions)
fp.powerLawExponent('filtred_network', network)
def filter_transitions(network1, network2):
filter = network1.new_vertex_property('bool')
print 'filter'
found_false = False
for i,v in enumerate(network1.vertices()):
if i% 100000==0:
print i
if v in network2.vertices():
#print "true"
filter[v]=True
else:
if not found_false:
print "false"
found_false=True
filter[v]=False
return filter
def plot_features():
print 'before load'
network = load_graph("output/wikipedianetwork.xml.gz")
print 'after load'
print 'before load'
network_transitions = load_graph("output/transitionsnetwork.xml.gz")
print 'after load'
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','page_rank', 'hub', 'authority', 'kcore']:
feature = network_transitions.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X>=f)$')
ax.set_ylim([0, 1])
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-transitions-features-cdf.pdf')
plt.clf()
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','page_rank', 'hub', 'authority', 'kcore']:
feature = network_transitions.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X<=f)$')
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-transitions-features-ccdf.pdf')
plt.clf()
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','page_rank', 'hub', 'authority', 'kcore']:
feature = network.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X>=f)$')
ax.set_ylim([0, 1])
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-features-cdf.pdf')
plt.clf()
colors= {'local_clust':'r','eigenvector_centr':'b', 'page_rank': 'g', 'kcore':'m', 'hub': 'c', 'authority':'k'}
labels = {'local_clust': 'clust.', 'eigenvector_centr':'eigen. centr.','page_rank': 'page rank', 'kcore': 'kcore', 'hub':'hub', 'authority':'authority'}
fig = plt.figure()
ax = fig.add_subplot(111)
for f in ['local_clust','eigenvector_centr','page_rank', 'hub', 'authority', 'kcore']:
feature = network.vertex_properties[f]
powerlaw.plot_cdf(feature.get_array(), ax, label=labels[f],color=colors[f])
ax.set_xlabel('Feature $f$')
ax.set_ylabel('$P(X<=f)$')
plt.legend(fancybox=True, loc=3, ncol=2, prop={'size':4})
plt.tight_layout()
plt.savefig('output/wikipedia-features-ccdf.pdf')
def plot_degree_filtered_sql():
print 'before select'
db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
conn = db._create_connection()
cursor = conn.cursor()
cursor.execute('SELECT source_article_id, target_article_id FROM link_occurences where source_article_id in '
' (select distinct prev_id from clickstream_derived_internal_links);')
result = cursor.fetchall()
network = Graph()
print 'after select'
print 'result len'
print len(result)
for i, link in enumerate(result):
if i % 1000000==0:
print i, len(result)
network.add_edge(link[0], link[1])
# filter all nodes that have no edges
print 'filter nodes with degree zero graph tool specific code'
network = GraphView(network, vfilt=lambda v : v.out_degree()+v.in_degree()>0 )
print 'before save'
network.save("output/wikipedianetworkfilteredwithtransitions_prev_id.xml.gz")
print 'done'
cursor.execute('SELECT source_article_id, target_article_id FROM link_occurences where target_article_id in '
' (select distinct curr_id from clickstream_derived_internal_links);')
result = cursor.fetchall()
network = Graph()
print 'after select'
print 'resutl len'
print len(result)
for i, link in enumerate(result):
if i % 1000000==0:
print i, len(result)
network.add_edge(link[0], link[1])
# filter all nodes that have no edges
print 'filter nodes with degree zero graph tool specific code'
network = GraphView(network, vfilt=lambda v : v.out_degree()+v.in_degree()>0 )
print 'before save'
network.save("output/wikipedianetworkfilteredwithtransitions_curr_id.xml.gz")
print 'done'
def plot_degree_filtered_with_transitions(network_name):
# wikipedia graph structural statistics
print 'before load'
network = load_graph("output/wikipedianetworkfilteredwithtransitions_"+network_name+".xml.gz")
print 'after load'
print 'before load'
network_transitions = load_graph("output/transitionsnetwork.xml.gz")
print 'after load'
out_hist = vertex_hist(network, "out")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, markeredgecolor='none', label=r'$D_{wiki}$')
out_hist = vertex_hist(network_transitions, "out")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, markeredgecolor='none', label=r'$D_{trans}$')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':5})
#plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':5}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlim([10**0, 10**4])
ax.set_xlabel('Out-degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-outdegree-filtered'+network_name+'-sql.pdf', bbox_inches='tight')
out_hist = vertex_hist(network, "in")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, markeredgecolor='none', label=r'$D_{wiki}$')
out_hist = vertex_hist(network_transitions, "in")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, markeredgecolor='none', label=r'$D_{trans}$')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':5})
#plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':5}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlim([10**0, 10**6])
ax.set_xlabel('In-degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-indegree-filtered'+network_name+'-sql.pdf', bbox_inches='tight')
out_hist = vertex_hist(network, "total")
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.set_xscale('log')
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, markeredgecolor='none', label='wikipeida')
out_hist = vertex_hist(network_transitions, "total")
ax.plot(out_hist[1][:-1], out_hist[0], marker='o', markersize=3, markeredgecolor='none', label='transitions')
plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':5})
#plt.legend(fancybox=True, loc='upper right', ncol=1, prop={'size':5}, numpoints=1, handlelength=0)
ax.set_ylim([10**0, 10**6])
ax.set_xlim([10**0, 10**6])
ax.set_xlabel('Degree')
ax.set_ylabel('Frequency')
fig.tight_layout()
fig.savefig('output/wikipedia-transitions-degree-filtered'+network_name+'-sql.pdf', bbox_inches='tight')
print 'fit'+network_name
fp.powerLawExponent('transitions', network_transitions)
fp.powerLawExponent(network_name, network)
if __name__ == '__main__':
#plot_degree_filtered_sql()
plot_degree_filtered_with_transitions('prev_id')
#plot_degree_filtered_with_transitions('curr_id')
#plot_degree_filtered()
#plot_degree()
#plot_features()
| mit |
MartinSavc/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
murali-munna/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
yerkesobservatory/seo | modules/resource/server.py | 1 | 4061 | import io
import flask
import base64
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from typing import Dict
from routines import plots
from config import config
import logging
import colorlog
#from flask_cors import CORS
class ResourceServer(object):
""" This class is a REST endpoint designed to serve custom files (primary plots and images)
for the Meteor web app.
"""
# logger for class
log = None
def __init__(self):
""" We create the app, register routes, and runz.
"""
# initialize logging system if not already done
if not ResourceServer.log:
ResourceServer.__init_log()
# create the Flask app
app = flask.Flask("Resource Server")
# make it CORS compatible
# CORS(app)
@app.route('/visibility/<string:target>', methods=['GET'])
def visibility(target: str, **kwargs) -> Dict[str, str]:
return self.visibility(target, **kwargs)
@app.route('/preview/<string:target>', methods=['GET'])
def preview(target: str, **kwargs) -> Dict[str, str]:
return self.preview(target, **kwargs)
# start it
app.run(host='0.0.0.0', port=config.queue.resource_port)
def make_plot_response(self, figure: matplotlib.figure.Figure, **kwargs):
""" Given a matplotlib figure, base64 encode the figure and
make the appropriate HTML response.
"""
# create bytes object to store image
img = io.BytesIO()
# save the figure into bytes
self.log.debug('make_plot_response')
figure.savefig(img, format='png', bbox_inches='tight', **kwargs)
figure.savefig('/tmp/test.png', format='png',
bbox_inches='tight', **kwargs)
img.seek(0)
# construct HTML response from image
response = flask.make_response(
base64.b64encode(img.getvalue()).decode())
response.headers['Content-Type'] = 'image/png'
response.headers['Content-Transfer-Encoding'] = 'BASE64'
# support CORS
response.headers['Access-Control-Allow-Origin'] = (
flask.request.headers.get('ORIGIN') or 'https://queue.stoneedgeobservatory.com')
# close image and figures
img.close()
return response
def visibility(self, target: str) -> Dict[str, str]:
""" This endpoint produces a visibility curve (using code in /routines)
for the object provided by 'target', and returns it to the requester.
"""
self.log.info('visibility called!')
fig = plots.visibility_curve(target, figsize=(8, 4))
if fig:
response = self.make_plot_response(fig, transparent=False)
plt.close(fig)
return response
return flask.Response("{'error': 'Unable to create visibility plot'}", status=500, mimetype='application/json')
def preview(self, target: str) -> Dict[str, str]:
""" This endpoint uses astroplan to produce a preview image
"""
fig = plots.target_preview(target)
if fig:
response = self.make_plot_response(fig, transparent=True)
plt.close(fig)
return response
return flask.Response("{'error': 'Unable to create target preview'}", status=500, mimetype='application/json')
@classmethod
def __init_log(cls) -> bool:
""" Initialize the logging system for this module and set
a ColoredFormatter.
"""
# create format string for this module
format_str = config.logging.fmt.replace('[name]', 'RESOURCE')
formatter = colorlog.ColoredFormatter(
format_str, datefmt=config.logging.datefmt)
# create stream
stream = logging.StreamHandler()
stream.setLevel(logging.DEBUG)
stream.setFormatter(formatter)
# assign log method and set handler
cls.log = logging.getLogger('resource')
cls.log.setLevel(logging.DEBUG)
cls.log.addHandler(stream)
| gpl-3.0 |
cinepost/Copperfield_FX | copper/cop/cop_file.py | 1 | 10021 | import matplotlib.image
from PIL import Image
import pyopencl as cl
import numpy
import os
import logging
from copper.core.op.node_type import NodeTypeBase
from copper.core.op.node_type_category import Cop2NodeTypeCategory
from copper.core.op.op_data_socket import OP_DataSocket
from copper.core.data.image_data import ImageData
from copper.cop.cop_node import CopNode
from copper.core.parameter.parm_template import *
logger = logging.getLogger(__name__)
class COP2_File(CopNode):
class NodeType(NodeTypeBase):
icon_name = 'COP2_file'
type_name = 'file'
category = Cop2NodeTypeCategory
def __init__(self, engine, parent):
super(COP2_File, self).__init__(engine, parent)
self._output_sockets = (
OP_DataSocket(self, "output1", ImageData),
)
self.program = self.engine.load_program("source_image.cl")
def parmTemplates(self):
templates = super(COP2_File, self).parmTemplates()
templates += [
StringParmTemplate(name="filename", label="File", default_value=("copper/media/default.png",), string_type=StringParmType.FileReference),
MenuParmTemplate(name='overridesize', label='File Size', menu_items=('natural', 'project', 'size'), menu_labels=('Natural Resolution',
'Project Resolution', 'Specific Resolution'), default_value=0),
IntParmTemplate(name="size", label="Size", length=2, default_value=(512,512), naming_scheme=ParmNamingScheme.Base1),
ToggleParmTemplate(name="flipy", label="Flip Image", default_value=False),
IntParmTemplate(name='startframe', label='Shift to Start Frame', length=1, naming_scheme=ParmNamingScheme.Base1, default_value=(1,)),
IntParmTemplate(name='start', label='File Range Start', length=1, naming_scheme=ParmNamingScheme.Base1, default_value=(1,)),
MenuParmTemplate(name='missingfr', label='Missing Frames', menu_items=('closest', 'previous', 'next', 'black', 'error'), menu_labels=('Use Closest Frame',
'Use Previous Frame', 'Use Next Frame', 'Use Black Frame', 'Report Error'), default_value=0),
]
return templates
@classmethod
def label(cls):
return "File"
def xRes(self):
if self.overridesize:
return self.parm("size1").eval()
self.cook()
return self.image_width
def yRes(self):
if self.overridesize:
return self.parm("size2").eval()
self.cook()
return self.image_height
def imageBounds(self):
return (0, 0, self.xRes(), self.yRes())
@property
def overridesize(self):
if self.parm("overridesize").evalAsString() != "natural":
return True
return False
def loadJPG(self, filename, cl_context):
logger.debug("Loading jpg image %s" % filename)
img = Image.open(filename).convert("RGBA")
im = numpy.asarray(img)
self.source_width = im.shape[1]
self.source_height = im.shape[0]
if self.parm("size1").eval() != 0 and self.overridesize:
self.image_width = self.parm("size1").eval()
else:
self.image_width = self.source_width
if self.parm("size2").eval() != 0 and self.overridesize:
self.image_height = self.parm("size2").eval()
else:
self.image_height = self.source_height
r = numpy.array(im[:,:,0],dtype=numpy.uint8)
g = numpy.array(im[:,:,1],dtype=numpy.uint8)
b = numpy.array(im[:,:,2],dtype=numpy.uint8)
a = numpy.array(im[:,:,3],dtype=numpy.uint8)
self.devInBufferR = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.R, cl.channel_type.UNORM_INT8),
shape=(self.source_width, self.source_height,), pitches=(self.source_width,), hostbuf=r)
#self.devInBufferR = cl.image_from_array(cl_context, r, num_channels=1)
#self.devInBufferG = cl.image_from_array(cl_context, g, num_channels=1)
#self.devInBufferB = cl.image_from_array(cl_context, b, num_channels=1)
#self.devInBufferA = cl.image_from_array(cl_context, a, num_channels=1)
self.devInBufferG = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.R, cl.channel_type.UNORM_INT8),
shape=(self.source_width, self.source_height,), pitches=(self.source_width,), hostbuf=g)
self.devInBufferB = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.R, cl.channel_type.UNORM_INT8),
shape=(self.source_width, self.source_height,), pitches=(self.source_width,), hostbuf=b)
self.devInBufferA = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.R, cl.channel_type.UNORM_INT8),
shape=(self.source_width, self.source_height,), pitches=(self.source_width,), hostbuf=a)
logger.debug("Jpg image %s loaded" % filename)
def loadEXR(self, filename, cl_context):
import OpenEXR
import Imath
pt = Imath.PixelType(Imath.PixelType.HALF)
image = OpenEXR.InputFile(filename)
header = image.header()
dw = header['dataWindow']
channels = header['channels']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
self.source_width = size[0]
self.source_height = size[1]
if self.parm("size1").eval() != 0 and self.overridesize:
self.image_width = self.parm("size1").eval()
else:
self.image_width = self.source_width
if self.parm("size2").eval() != 0 and self.overridesize:
self.image_height = self.parm("size2").eval()
else:
self.image_height = self.source_height
redstr = image.channel('R', pt)
host_buff_r = numpy.fromstring(redstr, dtype = numpy.float16)
host_buff_r.shape = (size[1], size[0]) # Numpy arrays are (row, col)
self.devInBufferR = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.INTENSITY, cl.channel_type.HALF_FLOAT), shape=(self.source_width, self.source_height,), pitches=(self.source_width * 2,), hostbuf=host_buff_r)
greenstr = image.channel('G', pt)
host_buff_g = numpy.fromstring(greenstr, dtype = numpy.float16)
host_buff_g.shape = (size[1], size[0]) # Numpy arrays are (row, col)
self.devInBufferG = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.INTENSITY, cl.channel_type.HALF_FLOAT), shape=(self.source_width, self.source_height,), pitches=(self.source_width * 2,), hostbuf=host_buff_g)
bluestr = image.channel('B', pt)
host_buff_b = numpy.fromstring(bluestr, dtype = numpy.float16)
host_buff_b.shape = (size[1], size[0]) # Numpy arrays are (row, col)
self.devInBufferB = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.INTENSITY, cl.channel_type.HALF_FLOAT), shape=(self.source_width, self.source_height,), pitches=(self.source_width * 2,), hostbuf=host_buff_b)
if(channels.get('A') is not None):
alphastr = image.channel('A', pt)
host_buff_a = numpy.fromstring(alphastr, dtype = numpy.float16)
host_buff_a.shape = (size[1], size[0]) # Numpy arrays are (row, col)
self.devInBufferA = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.INTENSITY, cl.channel_type.HALF_FLOAT), shape=(self.source_width, self.source_height,), pitches=(self.source_width * 2,), hostbuf=host_buff_a)
else:
self.devInBufferA = cl.Image(cl_context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, cl.ImageFormat(cl.channel_order.INTENSITY, cl.channel_type.HALF_FLOAT), shape=(self.source_width, self.source_height,), pitches=(self.source_width * 2,), hostbuf=numpy.ones(self.source_width * self.source_height, dtype = numpy.float16))
def getImageFileName(self):
filename = self.parm("filename").evalAsString()
return filename
#image_frame = self.engine.frame() + self.parm("start").evalAsInt() - self.parm("startframe").evalAsInt()
#return filename.expandedString(context={"frame": image_frame})
def compute(self, lock, cl_context, cl_queue):
super(COP2_File, self).compute()
imagefile = self.getImageFileName()
self.image_width = self.parm("size1").eval()
self.image_height = self.parm("size2").eval()
if os.path.isfile(imagefile):
ext = imagefile.rsplit(".")[-1]
if ext.lower() in ["jpg", "jpeg", "png"]:
self.loadJPG(imagefile, cl_context)
logger.debug("Creating compute image for %s image" % ext)
self.devOutBuffer = cl.Image(cl_context, cl.mem_flags.READ_WRITE, self.image_format, shape=(self.image_width, self.image_height))
exec_evt = self.program.run_jpg(cl_queue, (self.image_width, self.image_height), None,
self.devInBufferR, # red channel buffer
self.devInBufferG, # green channel buffer
self.devInBufferB, # blue channel buffer
self.devInBufferA, # alpha channel buffer
self.devOutBuffer,
numpy.int32(self.source_width),
numpy.int32(self.source_height),
numpy.int32(self.image_width),
numpy.int32(self.image_height)
)
exec_evt.wait()
elif ext.lower() == "exr":
self.loadEXR(imagefile, cl_context)
logger.debug("Creating compute image for %s image" % ext)
self.devOutBuffer = cl.Image(cl_context, cl.mem_flags.READ_WRITE, self.image_format, shape=(self.image_width, self.image_height))
exec_evt = self.program.run_exr(cl_queue, (self.image_width, self.image_height), None,
self.devInBufferR, # red channel buffer
self.devInBufferG, # green channel buffer
self.devInBufferB, # blue channel buffer
self.devInBufferA, # alpha channel buffer
self.devOutBuffer,
numpy.int32(self.source_width),
numpy.int32(self.source_height),
numpy.int32(self.image_width),
numpy.int32(self.image_height),
)
exec_evt.wait()
else:
if self.parm("missingfr").eval() is 4:
raise BaseException("Image file %s does not exist !!!" % imagefile)
else:
logging.warning("Image file %s does not found !!! Using BLACK frame instead." % imagefile)
self.devOutBuffer = cl.Image(cl_context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, self.image_format, shape=(self.image_width, self.image_height), hostbuf=numpy.zeros(self.image_width * self.image_height * 4, dtype = numpy.float32))
| unlicense |
mortada/scipy | scipy/integrate/quadrature.py | 25 | 27849 | from __future__ import division, print_function, absolute_import
__all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb',
'cumtrapz','newton_cotes']
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from numpy import sum, ones, add, diff, isinf, isscalar, \
asarray, real, trapz, arange, empty
import numpy as np
import math
import warnings
from scipy._lib.six import xrange
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results for speeding up multiple calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func,a,b,args=(),n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
[x,w] = _cached_p_roots(n)
x = real(x)
ainf, binf = map(isinf,(a,b))
if ainf or binf:
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0*sum(w*func(y,*args),0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if isscalar(x):
return func(x, *args)
x = asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
if hasattr(y0, 'dtype'):
output = empty((n,), dtype=y0.dtype)
else:
output = empty((n,), dtype=type(y0))
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = asarray(y)
if x is None:
d = dx
else:
x = asarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = add.accumulate(d * (y[slice1] + y[slice2]) / 2.0, axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y,start,stop,x,dx,axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
all = (slice(None),)*nd
slice0 = tupleset(all, axis, slice(start, stop, step))
slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = add.reduce(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = diff(x,axis=axis)
sl0 = tupleset(all, axis, slice(start, stop, step))
sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1)),axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = asarray(x)
if len(x.shape) == 1:
shapex = ones(nd)
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be 'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y,0,N-3,x,dx,axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y,1,N-2,x,dx,axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y,0,N-2,x,dx,axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
all = (slice(None),) * nd
slice0 = tupleset(all, axis, 0)
slicem1 = tupleset(all, axis, -1)
h = Ninterv*asarray(dx)*1.0
R[(0,0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = all
start = stop = step = Ninterv
for i in range(1,k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start,stop,step))
step >>= 1
R[(i,0)] = 0.5*(R[(i-1,0)] + h*add.reduce(y[slice_R],axis))
for j in range(1,i+1):
R[(i,j)] = R[(i,j-1)] + \
(R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*j))-1)
h = h / 2.0
if show:
if not isscalar(R[(0,0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%" + str(width) + '.' + str(precis)+'f'
print("\n Richardson Extrapolation Table for Romberg Integration ")
print("====================================================================")
for i in range(0,k+1):
for j in range(0,i+1):
print(formstr % R[(i,j)], end=' ')
print()
print("====================================================================\n")
return R[(k,k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * arange(0, numtosum)
s = sum(function(points),0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if isinf(a) or isinf(b):
raise ValueError("Romberg integration only available for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a,b]
intrange = b-a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
for i in xrange(1, divmax+1):
n = n * 2
ordsum = ordsum + _difftrap(vfunc, interval, n)
resmat.append([])
resmat[i].append(intrange * ordsum / n)
for k in range(i):
resmat[i].append(_romberg_diff(resmat[i-1][k], resmat[i][k], k+1))
result = resmat[i][i]
lastresult = resmat[i-1][i-1]
err = abs(result - lastresult)
if err < tol or err < rtol*abs(result):
break
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1:(1,2,[1,1],-1,12),
2:(1,3,[1,4,1],-1,90),
3:(3,8,[1,3,3,1],-3,80),
4:(2,45,[7,32,12,32,7],-8,945),
5:(5,288,[19,75,50,50,75,19],-275,12096),
6:(1,140,[41,216,27,272,27,216,41],-9,1400),
7:(7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8:(4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9:(9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10:(5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11:(11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12:(1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13:(13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14:(7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]` and :math:`\\Delta x = \\frac{x_N-x_0}{N}`
is the averages samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
return na*np.array(vi,float)/da, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2.0*yi - 1
nvec = np.arange(0,N+1)
C = ti**nvec[:,np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = np.dot(Cinv[:,::2],vec) * N/2
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
RealTimeWeb/datasets | preprocess/energy/process.py | 1 | 3434 | import pandas as pd
import json
import states
from tqdm import tqdm
df = pd.read_csv('Complete_SEDS.csv')
codes = df['MSN'].unique()
sources = [c[:2] for c in codes]
uses = [c[2:4] for c in codes]
purposes = [c[4] for c in codes]
#print(list(sorted(set(sources))))
#print(list(sorted(set(uses))))
#print(list(sorted(set(purposes))))
#print(df['MSN'].str[:2].value_counts())
#states = df['StateCode'].value_counts()
#print(len(states))
sources = {
'CL': 'Coal',
#'EL': 'Electricity',
'NG': 'Natural Gas',
'NU': 'Nuclear',
#'PA': 'Petroleum',
#'WD': 'Wood',
#'WW': 'Biomass Waste',
'DF': 'Distillate Fuel Oil',
'LG': 'Liquefied Petroleum Gases',
#'RF': 'Residual Fuel Oil',
#'ES': 'Electricity Sales',
#'MG': 'Motor Gasoline',
'KS': 'Kerosene',
#'PC': 'Petroleum Coke',
#'LU': 'Lubricants',
#'PE': 'Primary Energy',
#'AR': 'Asphalt and Road Oil',
'PO': 'Other Petroleum Products',
#'JF': 'Jet Fuel',
#'AV': 'Aviation Gasoline',
'WY': 'Wind',
'HY': 'Hydropower',
'SO': 'Solar',
'WD': 'Wood',
'GE': 'Geothermal'
}
sectors = {
'Consumption': {
'Transportation': 'ACB',
'Commercial': 'CCB',
'Electric Power': 'EIB',
'Industrial': 'ICB',
'Residential': 'RCB',
'Refinery': 'RFB'
},
'Price': {
'Transportation': 'ACD',
'Commercial': 'CCD',
'Electric Power': 'EID',
'Industrial': 'ICD'
},
'Production': {
'Production': 'PRB',
#'Marketed Production': 'MPB'
},
'Expenditure': {
'Transportation': 'ACV',
'Commercial': 'CCV',
'Electric Power': 'EIV',
'Industrial': 'ICV',
'Residential': 'RCV'
}
}
skip_tags = []
for major, major_group in tqdm(sectors.items()):
for minor, minor_code in major_group.items():
for source_code, source_name in sources.items():
full_code = source_code+minor_code
if df[df['MSN'] == full_code]['Data'].sum() == 0:
skip_tags.append(full_code)
print(skip_tags)
df.set_index(['MSN', 'StateCode', 'Year'], inplace=True)
del df['Data_Status']
print(df.head())
def build_data_file():
results = []
for year in tqdm(range(1960, 2015)):
for state_name, state_code in states.full_to_code.items():
row = {
'Year': year,
'State': state_name
}
for major, major_group in sectors.items():
for minor, minor_code in major_group.items():
for source_code, source_name in sources.items():
full_code = source_code+minor_code
if full_code in skip_tags:
continue
id = (full_code, state_code, year)
try:
value = df.loc[id, 'Data']
except KeyError:
value = 0
if major not in row:
row[major] = {}
if minor not in row[major]:
row[major][minor] = {}
row[major][minor][source_name] = value
row['Production'] = row['Production']['Production']
results.append(row)
with open('energy.json', 'w') as out:
json.dump(results, out)
| gpl-2.0 |
e-mission/e-mission-server | bin/analysis/get_app_analytics.py | 3 | 4793 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.get_database as edb
import emission.storage.timeseries.abstract_timeseries as esta
import pandas as pd
import matplotlib as mpl
mpl.use('AGG')
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as mdates
def get_app_analytics():
df = pd.DataFrame()
for user in edb.get_uuid_db().find():
user_df = esta.TimeSeries.get_time_series(user['uuid']).get_data_df("stats/server_api_time", time_query=None)
if not user_df.empty:
df = df.append(user_df, ignore_index = True)
df['datetime'] = df.ts.apply(lambda ts: dt.datetime.fromtimestamp(ts))
df.ix[df.reading>1, 'reading'] = 1
fig, ax = plt.subplots()
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%m/%d/%Y"))
plt.ylabel('Response time')
dashboard_df = df[df.name == "POST_/result/metrics/timestamp"]
dashboard_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Dashboard')
fig.savefig('Dashboard.png')
plt.close(fig)
fig, ax = plt.subplots()
cache_put_df = df[df.name == "POST_/usercache/put"]
cache_put_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Usercache_put')
fig.savefig('Usercache_put.png')
plt.close(fig)
fig, ax = plt.subplots()
cache_get_df = df[df.name == "POST_/usercache/get"]
cache_get_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Usercache_get')
fig.savefig('Usercache_get.png')
plt.close(fig)
fig, ax = plt.subplots()
stats_set_df = df[df.name == "POST_/stats/set"]
stats_set_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Stats_set')
fig.savefig('Stats_set.png')
plt.close(fig)
fig, ax = plt.subplots()
habitica_intro_df = df[df.name == "POST_/habiticaRegister"]
habitica_intro_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Habitica Sign up and Login')
fig.savefig('Habitica Sign up_Login.png')
plt.close(fig)
fig, ax = plt.subplots()
habitica_df = df[df.name == "POST_/habiticaProxy"]
habitica_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Habitica')
fig.savefig('Habitica.png')
plt.close(fig)
fig, ax = plt.subplots()
diary_df = df[df.name.str.contains("POST_/timeline/getTrips")]
diary_df.plot(x="datetime", y="reading", ax=ax, style='+', legend=None)
plt.title('Diary')
fig.savefig('Diary.png')
plt.close(fig)
return
def get_aggregate_analytics():
df = pd.DataFrame()
for user in edb.get_uuid_db().find():
user_df = esta.TimeSeries.get_time_series(user['uuid']).get_data_df("stats/server_api_time", time_query=None)
if not user_df.empty:
df = df.append(user_df, ignore_index = True)
df['datetime'] = df.ts.apply(lambda ts: dt.datetime.fromtimestamp(ts))
df.ix[df.reading>1, 'reading'] = 1
fig, ax = plt.subplots()
ax.xaxis.set_major_locator(mdates.WeekdayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%m/%d/%Y"))
plt.ylabel('Response time')
plt.title('App Analytics')
f_df = df[df.name == "POST_/result/metrics/timestamp"]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='g', label='Dashboard')
f_df = df[df.name == "POST_/usercache/put"]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='b', label='Usercache_put')
f_df = df[df.name == "POST_/usercache/get"]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='r', label='Usercache_get')
f_df = df[df.name == "POST_/stats/set"]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='black', label='Stats_set')
f_df = df[df.name == "POST_/habiticaRegister"]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='orange', label='Habitica Sign up_Login')
f_df = df[df.name == "POST_/habiticaProxy"]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='aqua', label='Habitica')
f_df = df[df.name.str.contains("POST_/timeline/getTrips")]
f_df.plot(x="datetime", y="reading", ax=ax, style='+', color='m', label='Diary')
plt.legend()
fig.savefig('app_analytics.png')
fig.savefig('app_analytics.eps', format='eps', dpi=1000)
return
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
get_app_analytics()
get_aggregate_analytics()
| bsd-3-clause |
wesm/ibis | ibis/sql/sqlite/tests/test_functions.py | 1 | 14619 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import pytest # noqa
from .common import SQLiteTests
from ibis.compat import unittest
from ibis import literal as L
import ibis.expr.types as ir
import ibis
import pandas.util.testing as tm
import sqlalchemy as sa
class TestSQLiteFunctions(SQLiteTests, unittest.TestCase):
def test_cast(self):
at = self._to_sqla(self.alltypes)
d = self.alltypes.double_col
s = self.alltypes.string_col
sa_d = at.c.double_col
sa_s = at.c.string_col
cases = [
(d.cast('int8'), sa.cast(sa_d, sa.types.SMALLINT)),
(s.cast('double'), sa.cast(sa_s, sa.types.REAL)),
(s.cast('float'), sa.cast(sa_s, sa.types.REAL))
]
self._check_expr_cases(cases)
@pytest.mark.xfail(raises=AssertionError, reason='NYI')
def test_decimal_cast(self):
assert False
def test_timestamp_cast_noop(self):
# See GH #592
at = self._to_sqla(self.alltypes)
tc = self.alltypes.timestamp_col
ic = self.alltypes.int_col
tc_casted = tc.cast('timestamp')
ic_casted = ic.cast('timestamp')
# Logically, it's a timestamp
assert isinstance(tc_casted, ir.TimestampColumn)
assert isinstance(ic_casted, ir.TimestampColumn)
# But it's a no-op when translated to SQLAlchemy
cases = [
(tc_casted, at.c.timestamp_col),
(ic_casted, at.c.int_col)
]
self._check_expr_cases(cases)
def test_timestamp_functions(self):
from datetime import datetime
v = L('2015-09-01 14:48:05.359').cast('timestamp')
cases = [
(v.strftime('%Y%m%d'), '20150901'),
(v.year(), 2015),
(v.month(), 9),
(v.day(), 1),
(v.hour(), 14),
(v.minute(), 48),
(v.second(), 5),
(v.millisecond(), 359),
# there could be pathological failure at midnight somewhere, but
# that's okay
(ibis.now().strftime('%Y%m%d %H'),
datetime.utcnow().strftime('%Y%m%d %H'))
]
self._check_e2e_cases(cases)
def test_binary_arithmetic(self):
cases = [
(L(3) + L(4), 7),
(L(3) - L(4), -1),
(L(3) * L(4), 12),
(L(12) / L(4), 3),
# (L(12) ** L(2), 144),
(L(12) % L(5), 2)
]
self._check_e2e_cases(cases)
def test_div_floordiv(self):
cases = [
(L(7) / L(2), 3.5),
(L(7) // L(2), 3),
(L(7).floordiv(2), 3),
(L(2).rfloordiv(7), 3),
]
self._check_e2e_cases(cases)
def test_typeof(self):
cases = [
(L('foo_bar').typeof(), 'text'),
(L(5).typeof(), 'integer'),
(ibis.NA.typeof(), 'null'),
(L(1.2345).typeof(), 'real'),
]
self._check_e2e_cases(cases)
def test_nullifzero(self):
cases = [
(L(0).nullifzero(), None),
(L(5.5).nullifzero(), 5.5),
]
self._check_e2e_cases(cases)
def test_string_length(self):
cases = [
(L('foo_bar').length(), 7),
(L('').length(), 0),
]
self._check_e2e_cases(cases)
def test_string_substring(self):
cases = [
(L('foo_bar').left(3), 'foo'),
(L('foo_bar').right(3), 'bar'),
(L('foo_bar').substr(0, 3), 'foo'),
(L('foo_bar').substr(4, 3), 'bar'),
(L('foo_bar').substr(1), 'oo_bar'),
]
self._check_e2e_cases(cases)
def test_string_strip(self):
cases = [
(L(' foo ').lstrip(), 'foo '),
(L(' foo ').rstrip(), ' foo'),
(L(' foo ').strip(), 'foo'),
]
self._check_e2e_cases(cases)
def test_string_upper_lower(self):
cases = [
(L('foo').upper(), 'FOO'),
(L('FOO').lower(), 'foo'),
]
self._check_e2e_cases(cases)
def test_string_contains(self):
cases = [
(L('foobar').contains('bar'), True),
(L('foobar').contains('foo'), True),
(L('foobar').contains('baz'), False),
]
self._check_e2e_cases(cases)
def test_string_find(self):
cases = [
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
]
self._check_e2e_cases(cases)
def test_string_like(self):
cases = [
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
]
self._check_e2e_cases(cases)
def test_str_replace(self):
cases = [
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
]
self._check_e2e_cases(cases)
def test_math_functions(self):
cases = [
(L(-5).abs(), 5),
(L(5).abs(), 5),
(ibis.least(L(5), L(10), L(1)), 1),
(ibis.greatest(L(5), L(10), L(1)), 10),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
]
self._check_e2e_cases(cases)
def test_regexp(self):
pytest.skip('NYI: Requires adding regex udf with sqlite3')
v = L('abcd')
v2 = L('1222')
cases = [
(v.re_search('[a-z]'), True),
(v.re_search('[\d]+'), False),
(v2.re_search('[\d]+'), True),
]
self._check_e2e_cases(cases)
def test_fillna_nullif(self):
cases = [
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
]
self._check_e2e_cases(cases)
@pytest.mark.xfail(raises=AssertionError, reason='NYI')
def test_coalesce(self):
assert False
def test_numeric_builtins_work(self):
t = self.alltypes
d = t.double_col
exprs = [
d.fillna(0),
]
self._execute_projection(t, exprs)
def test_misc_builtins_work(self):
t = self.alltypes
d = t.double_col
exprs = [
(d > 20).ifelse(10, -20),
(d > 20).ifelse(10, -20).abs(),
# tier and histogram
d.bucket([0, 10, 25, 50, 100]),
d.bucket([0, 10, 25, 50], include_over=True),
d.bucket([0, 10, 25, 50], include_over=True, close_extreme=False),
d.bucket([10, 25, 50, 100], include_under=True),
]
self._execute_projection(t, exprs)
def test_category_label(self):
t = self.alltypes
d = t.double_col
bucket = d.bucket([0, 10, 25, 50, 100])
exprs = [
bucket.label(['a', 'b', 'c', 'd'])
]
self._execute_projection(t, exprs)
def test_union(self):
pytest.skip('union not working yet')
t = self.alltypes
expr = (t.group_by('string_col')
.aggregate(t.double_col.sum().name('foo'))
.sort_by('string_col'))
t1 = expr.limit(4)
t2 = expr.limit(4, offset=4)
t3 = expr.limit(8)
result = t1.union(t2).execute()
expected = t3.execute()
assert (result.string_col == expected.string_col).all()
def test_aggregations_execute(self):
table = self.alltypes.limit(100)
d = table.double_col
s = table.string_col
cond = table.string_col.isin(['1', '7'])
exprs = [
table.bool_col.count(),
table.bool_col.any(),
table.bool_col.all(),
table.bool_col.notany(),
table.bool_col.notall(),
d.sum(),
d.mean(),
d.min(),
d.max(),
table.bool_col.count(where=cond),
d.sum(where=cond),
d.mean(where=cond),
d.min(where=cond),
d.max(where=cond),
s.group_concat(),
]
self._execute_aggregation(table, exprs)
def test_distinct_aggregates(self):
table = self.alltypes.limit(100)
exprs = [
table.double_col.nunique()
]
self._execute_aggregation(table, exprs)
def test_not_exists_works(self):
t = self.alltypes
t2 = t.view()
expr = t[-(t.string_col == t2.string_col).any()]
expr.execute()
def test_interactive_repr_shows_error(self):
# #591. Doing this in SQLite because so many built-in functions are not
# available
import ibis.config as config
expr = self.alltypes.double_col.approx_nunique()
with config.option_context('interactive', True):
result = repr(expr)
assert 'no translator rule' in result.lower()
def test_subquery_invokes_sqlite_compiler(self):
t = self.alltypes
expr = (t.mutate(d=t.double_col.fillna(0))
.limit(1000)
.group_by('string_col')
.size())
expr.execute()
def _execute_aggregation(self, table, exprs):
agg_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
agged_table = table.aggregate(agg_exprs)
agged_table.execute()
def _execute_projection(self, table, exprs):
agg_exprs = [expr.name('e%d' % i)
for i, expr in enumerate(exprs)]
proj = table.projection(agg_exprs)
proj.execute()
def test_filter_has_sqla_table(self):
t = self.alltypes
pred = t.year == 2010
filt = t.filter(pred).sort_by('float_col').float_col
s = filt.execute()
result = s.squeeze().reset_index(drop=True)
expected = t.execute().query(
'year == 2010'
).sort('float_col').float_col
assert len(result) == len(expected)
def test_column_access_after_sort(self):
t = self.alltypes
expr = t.sort_by('float_col').string_col
# it works!
expr.execute(limit=10)
def test_materialized_join(self):
path = '__ibis_tmp_{0}.db'.format(ibis.util.guid())
con = ibis.sqlite.connect(path, create=True)
try:
con.raw_sql("create table mj1 (id1 integer, val1 real)")
con.raw_sql("insert into mj1 values (1, 10), (2, 20)")
con.raw_sql("create table mj2 (id2 integer, val2 real)")
con.raw_sql("insert into mj2 values (1, 15), (2, 25)")
t1 = con.table('mj1')
t2 = con.table('mj2')
joined = t1.join(t2, t1.id1 == t2.id2).materialize()
result = joined.val2.execute()
assert len(result) == 2
finally:
os.remove(path)
def test_anonymous_aggregate(self):
t = self.alltypes
expr = t[t.double_col > t.double_col.mean()]
result = expr.execute()
df = t.execute()
expected = df[df.double_col > df.double_col.mean()].reset_index(
drop=True
)
tm.assert_frame_equal(result, expected)
def test_head(self):
t = self.alltypes
result = t.head().execute()
expected = t.limit(5).execute()
tm.assert_frame_equal(result, expected)
def test_identical_to(self):
t = self.alltypes
dt = t[['tinyint_col', 'double_col']].execute()
expr = t.tinyint_col.identical_to(t.double_col)
result = expr.execute()
expected = (dt.tinyint_col.isnull() & dt.double_col.isnull()) | (
dt.tinyint_col == dt.double_col
)
expected.name = result.name
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(raises=AttributeError, reason='NYI')
def test_truncate(self):
expr = self.alltypes.limit(5)
name = str(uuid.uuid4())
self.con.create_table(name, expr)
t = self.con.table(name)
assert len(t.execute()) == 5
t.truncate()
assert len(t.execute()) == 0
t.drop()
@pytest.mark.xfail(
raises=AssertionError,
reason='SQLite returns bools as integers, Ibis should recast them'
)
def test_not(self):
t = self.alltypes.limit(10)
expr = t.projection([(~t.double_col.isnull()).name('double_col')])
result = expr.execute().double_col
expected = ~t.execute().double_col.isnull()
tm.assert_series_equal(result, expected)
@pytest.mark.sqlite
def test_compile_with_named_table():
t = ibis.table([('a', 'string')], name='t')
result = ibis.sqlite.compile(t.a)
st = sa.table('t', sa.column('a', sa.String)).alias('t0')
assert str(result) == str(sa.select([st.c.a]))
@pytest.mark.sqlite
def test_compile_with_unnamed_table():
t = ibis.table([('a', 'string')])
result = ibis.sqlite.compile(t.a)
st = sa.table('t0', sa.column('a', sa.String)).alias('t0')
assert str(result) == str(sa.select([st.c.a]))
@pytest.mark.sqlite
def test_compile_with_multiple_unnamed_tables():
t = ibis.table([('a', 'string')])
s = ibis.table([('b', 'string')])
join = t.join(s, t.a == s.b)
result = ibis.sqlite.compile(join)
sqla_t = sa.table('t0', sa.column('a', sa.String)).alias('t0')
sqla_s = sa.table('t1', sa.column('b', sa.String)).alias('t1')
sqla_join = sqla_t.join(sqla_s, sqla_t.c.a == sqla_s.c.b)
expected = sa.select([sqla_t.c.a, sqla_s.c.b]).select_from(sqla_join)
assert str(result) == str(expected)
@pytest.mark.sqlite
def test_compile_with_one_unnamed_table():
t = ibis.table([('a', 'string')])
s = ibis.table([('b', 'string')], name='s')
join = t.join(s, t.a == s.b)
result = ibis.sqlite.compile(join)
sqla_t = sa.table('t0', sa.column('a', sa.String)).alias('t0')
sqla_s = sa.table('s', sa.column('b', sa.String)).alias('t1')
sqla_join = sqla_t.join(sqla_s, sqla_t.c.a == sqla_s.c.b)
expected = sa.select([sqla_t.c.a, sqla_s.c.b]).select_from(sqla_join)
assert str(result) == str(expected)
| apache-2.0 |
choldgraf/download | setup.py | 1 | 1959 | #! /usr/bin/env python
#
# Copyright (C) 2015 Chris Holdgraf
# <choldgraf@gmail.com>
#
# Adapted from MNE-Python
import os
from setuptools import setup
descr = """A quick module to help downloading files using python."""
with open("./download/__init__.py", "r") as ff:
lines = ff.readlines()
for line in lines:
if line.startswith("__version__"):
__version__ = line.split("= ")[-1].strip().strip('"')
break
DISTNAME = "download"
DESCRIPTION = descr
MAINTAINER = "Chris Holdgraf"
MAINTAINER_EMAIL = "choldgraf@gmail.com"
URL = "https://github.com/choldgraf/download"
LICENSE = "BSD (3-clause)"
DOWNLOAD_URL = "https://github.com/choldgraf/download"
with open("./README.rst", "r") as ff:
LONG_DESCRIPTION = ff.read()
if __name__ == "__main__":
if os.path.exists("MANIFEST"):
os.remove("MANIFEST")
setup(
name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=False,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/x-rst",
license=LICENSE,
url=URL,
version=__version__,
download_url=DOWNLOAD_URL,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
],
platforms="any",
packages=["download"],
package_data={},
scripts=[],
install_requires=["tqdm", "six", "requests"],
extras_require={
"dev": ["numpy", "codecov", "pytest", "pytest-cov"],
"sphinx": ["matplotlib", "pandas", "sphinx", "sphinx-gallery", "pillow"],
},
)
| mit |
ceholden/yatsm | yatsm/utils.py | 1 | 7712 | from __future__ import division
from datetime import datetime as dt
import fnmatch
import os
import re
import sys
import numpy as np
import pandas as pd
import six
try:
from scandir import walk
except:
from os import walk
from .log_yatsm import logger
# JOB SPECIFIC FUNCTIONS
def distribute_jobs(job_number, total_jobs, n, interlaced=True):
""" Assign `job_number` out of `total_jobs` a subset of `n` tasks
Args:
job_number (int): 0-indexed processor to distribute jobs to
total_jobs (int): total number of processors running jobs
n (int): number of tasks (e.g., lines in image, regions in segment)
interlaced (bool, optional): interlace job assignment (default: True)
Returns:
np.ndarray: np.ndarray of task IDs to be processed
Raises:
ValueError: raise error if `job_number` and `total_jobs` specified
result in no jobs being assinged (happens if `job_number` and
`total_jobs` are both 1)
"""
if interlaced:
assigned = 0
tasks = []
while job_number + total_jobs * assigned < n:
tasks.append(job_number + total_jobs * assigned)
assigned += 1
tasks = np.asarray(tasks)
else:
size = int(n / total_jobs)
i_start = size * job_number
i_end = size * (job_number + 1)
tasks = np.arange(i_start, min(i_end, n))
if tasks.size == 0:
raise ValueError(
'No jobs assigned for job_number/total_jobs: {j}/{t}'.format(
j=job_number,
t=total_jobs))
return tasks
def get_output_name(dataset_config, line):
""" Returns output name for specified config and line number
Args:
dataset_config (dict): configuration information about the dataset
line (int): line of the dataset for output
Returns:
filename (str): output filename
"""
return os.path.join(dataset_config['output'],
'%s%s.npz' % (dataset_config['output_prefix'], line))
# IMAGE DATASET READING
def csvfile_to_dataframe(input_file, date_format='%Y%j'):
""" Return sorted filenames of images from input text file
Args:
input_file (str): text file of dates and files
date_format (str): format of dates in file
Returns:
pd.DataFrame: pd.DataFrame of dates, sensor IDs, and filenames
"""
df = pd.read_csv(input_file)
# Guess and convert date field
date_col = [i for i, n in enumerate(df.columns) if 'date' in n.lower()]
if not date_col:
raise KeyError('Could not find date column in input file')
if len(date_col) > 1:
logger.warning('Multiple date columns found in input CSV file. '
'Using %s' % df.columns[date_col[0]])
date_col = df.columns[date_col[0]]
df[date_col] = pd.to_datetime(
df[date_col], format=date_format).map(lambda x: dt.toordinal(x))
return df
def get_image_IDs(filenames):
""" Returns image IDs for each filename (basename of dirname of file)
Args:
filenames (iterable): filenames to return image IDs for
Returns:
list: image IDs for each file in `filenames`
"""
return [os.path.basename(os.path.dirname(f)) for f in filenames]
# MAPPING UTILITIES
def write_output(raster, output, image_ds, gdal_frmt, ndv, band_names=None):
""" Write raster to output file """
from osgeo import gdal, gdal_array
logger.debug('Writing output to disk')
driver = gdal.GetDriverByName(str(gdal_frmt))
if len(raster.shape) > 2:
nband = raster.shape[2]
else:
nband = 1
ds = driver.Create(
output,
image_ds.RasterXSize, image_ds.RasterYSize, nband,
gdal_array.NumericTypeCodeToGDALTypeCode(raster.dtype.type)
)
if band_names is not None:
if len(band_names) != nband:
logger.error('Did not get enough names for all bands')
sys.exit(1)
if raster.ndim > 2:
for b in range(nband):
logger.debug(' writing band {b}'.format(b=b + 1))
ds.GetRasterBand(b + 1).WriteArray(raster[:, :, b])
ds.GetRasterBand(b + 1).SetNoDataValue(ndv)
if band_names is not None:
ds.GetRasterBand(b + 1).SetDescription(band_names[b])
ds.GetRasterBand(b + 1).SetMetadata({
'band_{i}'.format(i=b + 1): band_names[b]
})
else:
logger.debug(' writing band')
ds.GetRasterBand(1).WriteArray(raster)
ds.GetRasterBand(1).SetNoDataValue(ndv)
if band_names is not None:
ds.GetRasterBand(1).SetDescription(band_names[0])
ds.GetRasterBand(1).SetMetadata({'band_1': band_names[0]})
ds.SetProjection(image_ds.GetProjection())
ds.SetGeoTransform(image_ds.GetGeoTransform())
ds = None
# RESULT UTILITIES
def find_results(location, pattern):
""" Create list of result files and return sorted
Args:
location (str): directory location to search
pattern (str): glob style search pattern for results
Returns:
results (list): list of file paths for results found
"""
# Note: already checked for location existence in main()
records = []
for root, dirnames, filenames in walk(location):
for filename in fnmatch.filter(filenames, pattern):
records.append(os.path.join(root, filename))
if len(records) == 0:
raise IOError('Could not find results in: %s' % location)
records.sort()
return records
def iter_records(records, warn_on_empty=False, yield_filename=False):
""" Iterates over records, returning result NumPy array
Args:
records (list): List containing filenames of results
warn_on_empty (bool, optional): Log warning if result contained no
result records (default: False)
yield_filename (bool, optional): Yield the filename and the record
Yields:
np.ndarray or tuple: Result saved in record and the filename, if desired
"""
n_records = len(records)
for _i, r in enumerate(records):
# Verbose progress
if np.mod(_i, 100) == 0:
logger.debug('{0:.1f}%'.format(_i / n_records * 100))
# Open output
try:
rec = np.load(r)['record']
except (ValueError, AssertionError, IOError) as e:
logger.warning('Error reading a result file (may be corrupted) '
'({}): {}'.format(r, str(e)))
continue
if rec.shape[0] == 0:
# No values in this file
if warn_on_empty:
logger.warning('Could not find results in {f}'.format(f=r))
continue
if yield_filename:
yield rec, r
else:
yield rec
# MISC UTILITIES
def date2index(dates, d):
""" Returns index of sorted array `dates` containing the date `d`
Args:
dates (np.ndarray): array of dates (or numbers really) in sorted order
d (int, float): number to search for
Returns:
int: index of `dates` containing value `d`
"""
return np.searchsorted(dates, d, side='right')
def is_integer(s):
""" Returns True if `s` is an integer """
try:
int(s)
return True
except:
return False
def copy_dict_filter_key(d, regex):
""" Copy a dict recursively, but only if key doesn't match regex pattern
"""
out = {}
for k, v in six.iteritems(d):
if not re.match(regex, k):
if isinstance(v, dict):
out[k] = copy_dict_filter_key(v, regex)
else:
out[k] = v
return out
| mit |
biokit/biokit | biokit/viz/corrplot.py | 1 | 18115 | """.. rubric:: Corrplot utilities
:author: Thomas Cokelaer
:references: http://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
"""
import string
from colormap import cmap_builder
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.patches import Ellipse, Circle, Rectangle, Wedge
from matplotlib.collections import PatchCollection
import pandas as pd
import scipy.cluster.hierarchy as hierarchy
from biokit.viz.linkage import Linkage
__all__ = ['Corrplot']
class Corrplot(Linkage):
"""An implementation of correlation plotting tools (corrplot)
Here is a simple example with a correlation matrix as an input (stored in
a pandas dataframe):
.. plot::
:width: 50%
:include-source:
# create a correlation-like data set stored in a Pandas' dataframe.
import string
# letters = string.uppercase[0:10] # python2
letters = string.ascii_uppercase[0:10]
import pandas as pd
df = pd.DataFrame(dict(( (k, np.random.random(10)+ord(k)-65) for k in letters)))
# and use corrplot
from biokit.viz import corrplot
c = corrplot.Corrplot(df)
c.plot()
.. seealso:: All functionalities are covered in this
`notebook <http://nbviewer.ipython.org/github/biokit/biokit/blob/master/notebooks/viz/corrplot.ipynb>`_
"""
def __init__(self, data, na=0):
""".. rubric:: Constructor
Plots the content of square matrix that contains correlation values.
:param data: input can be a dataframe (Pandas), or list of lists (python) or
a numpy matrix. Note, however, that values must be between -1 and 1. If not,
or if the matrix (or list of lists) is not squared, then correlation is
computed. The data or computed correlation is stored in :attr:`df` attribute.
:param bool compute_correlation: if the matrix is non-squared or values are not
bounded in -1,+1, correlation is computed. If you do not want that behaviour,
set this parameter to False. (True by default).
:param na: replace NA values with this value (default 0)
The :attr:`params` contains some tunable parameters for the colorbar in the
:meth:`plot` method.
::
# can be a list of lists, the correlation matrix is then a 2x2 matrix
c = corrplot.Corrplot([[1,1], [2,4], [3,3], [4,4]])
"""
super(Corrplot, self).__init__()
#: The input data is stored in a dataframe and must therefore be
#: compatible (list of lists, dictionary, matrices...)
self.df = pd.DataFrame(data, copy=True)
compute_correlation = False
w, h = self.df.shape
if self.df.max().max() > 1 or self.df.min().min()<-1:
compute_correlation = True
if w !=h:
compute_correlation = True
if list(self.df.index) != list(self.df.columns):
compute_correlation = True
if compute_correlation:
print("Computing correlation")
cor = self.df.corr()
self.df = cor
# replace NA with zero
self.df.fillna(na, inplace=True)
#: tunable parameters for the :meth:`plot` method.
self.params = {
'colorbar.N': 100,
'colorbar.shrink': .8,
'colorbar.orientation':'vertical'}
def _set_default_cmap(self):
self.cm = cmap_builder('#AA0000','white','darkblue')
def order(self, method='complete', metric='euclidean',inplace=False):
"""Rearrange the order of rows and columns after clustering
:param method: any scipy method (e.g., single, average, centroid,
median, ward). See scipy.cluster.hierarchy.linkage
:param metric: any scipy distance (euclidean, hamming, jaccard)
See scipy.spatial.distance or scipy.cluster.hieararchy
:param bool inplace: if set to True, the dataframe is replaced
You probably do not need to use that method. Use :meth:`plot` and
the two parameters order_metric and order_method instead.
"""
Y = self.linkage(self.df, method=method, metric=metric)
ind1 = hierarchy.fcluster(Y, 0.7*max(Y[:,2]), 'distance')
Z = hierarchy.dendrogram(Y, no_plot=True)
idx1 = Z['leaves']
cor2 = self.df.iloc[idx1,idx1]
if inplace is True:
self.df = cor2
else:
return cor2
self.Y = Y
self.Z = Z
self.idx1 = idx1
self.ind1 = ind1
#treee$order == Z.leaves and c.idx1
# hc = c.ind1
#clustab <- table(hc)[unique(hc[tree$order])]
#cu <- c(0, cumsum(clustab))
#mat <- cbind(cu[-(k + 1)] + 0.5, n - cu[-(k + 1)] + 0.5,
#cu[-1] + 0.5, n - cu[-1] + 0.5)
#rect(mat[,1], mat[,2], mat[,3], mat[,4], border = col, lwd = lwd)
def plot(self, fig=None, grid=True,
rotation=30, lower=None, upper=None,
shrink=0.9, facecolor='white', colorbar=True, label_color='black',
fontsize='small', edgecolor='black', method='ellipse',
order_method='complete', order_metric='euclidean', cmap=None,
ax=None, binarise_color=False):
"""plot the correlation matrix from the content of :attr:`df`
(dataframe)
By default, the correlation is shown on the upper and lower triangle and is
symmetric wrt to the diagonal. The symbols are ellipses. The symbols can
be changed to e.g. rectangle. The symbols are shown on upper and lower sides but
you could choose a symbol for the upper side and another for the lower side using
the **lower** and **upper** parameters.
:param fig: Create a new figure by default. If an instance of an existing
figure is provided, the corrplot is overlayed on the figure provided.
Can also be the number of the figure.
:param grid: add grid (Defaults to grey color). You can set it to False or a color.
:param rotation: rotate labels on y-axis
:param lower: if set to a valid method, plots the data on the lower
left triangle
:param upper: if set to a valid method, plots the data on the upper
left triangle
:param float shrink: maximum space used (in percent) by a symbol.
If negative values are provided, the absolute value is taken.
If greater than 1, the symbols wiill overlap.
:param facecolor: color of the background (defaults to white).
:param colorbar: add the colorbar (defaults to True).
:param str label_color: (defaults to black).
:param fontsize: size of the fonts defaults to 'small'.
:param method: shape to be used in 'ellipse', 'square', 'rectangle',
'color', 'text', 'circle', 'number', 'pie'.
:param order_method: see :meth:`order`.
:param order_metric: see : meth:`order`.
:param cmap: a valid cmap from matplotlib or colormap package (e.g.,
'jet', or 'copper'). Default is red/white/blue colors.
:param ax: a matplotlib axes.
The colorbar can be tuned with the parameters stored in :attr:`params`.
Here is an example. See notebook for other examples::
c = corrplot.Corrplot(dataframe)
c.plot(cmap=('Orange', 'white', 'green'))
c.plot(method='circle')
c.plot(colorbar=False, shrink=.8, upper='circle' )
"""
# default
if cmap != None:
try:
if isinstance(cmap, str):
self.cm = cmap_builder(cmap)
else:
self.cm = cmap_builder(*cmap)
except:
print("incorrect cmap. Use default one")
self._set_default_cmap()
else:
self._set_default_cmap()
self.shrink = abs(shrink)
self.fontsize = fontsize
self.edgecolor = edgecolor
df = self.order(method=order_method, metric=order_metric)
# figure can be a number or an instance; otherwise creates it
if isinstance(fig, int):
fig = plt.figure(num=fig, facecolor=facecolor)
elif fig is not None:
fig = plt.figure(num=fig.number, facecolor=facecolor)
else:
fig = plt.figure(num=None, facecolor=facecolor)
# do we have an axes to plot the data in ?
if ax is None:
ax = plt.subplot(1, 1, 1, aspect='equal', facecolor=facecolor)
else:
# if so, clear the axes. Colorbar cannot be removed easily.
plt.sca(ax)
ax.clear()
# subplot resets the bg color, let us set it again
fig.set_facecolor(facecolor)
width, height = df.shape
labels = (df.columns)
# add all patches to the figure
# TODO check value of lower and upper
if upper is None and lower is None:
mode = 'method'
diagonal = True
elif upper and lower:
mode = 'both'
diagonal = False
elif lower is not None:
mode = 'lower'
diagonal = True
elif upper is not None:
mode = 'upper'
diagonal = True
self.binarise_color = binarise_color
if mode == 'upper':
self._add_patches(df, upper, 'upper', ax, diagonal=True)
elif mode == 'lower':
self._add_patches(df, lower, 'lower', ax, diagonal=True)
elif mode == 'method':
self._add_patches(df, method, 'both', ax, diagonal=True)
elif mode == 'both':
self._add_patches(df, upper, 'upper', ax, diagonal=False)
self._add_patches(df, lower, 'lower', ax, diagonal=False)
# shift the limits to englobe the patches correctly
ax.set_xlim(-0.5, width-.5)
ax.set_ylim(-0.5, height-.5)
# set xticks/xlabels on top
ax.xaxis.tick_top()
xtickslocs = np.arange(len(labels))
ax.set_xticks(xtickslocs)
ax.set_xticklabels(labels, rotation=rotation, color=label_color,
fontsize=fontsize, ha='left')
ax.invert_yaxis()
ytickslocs = np.arange(len(labels))
ax.set_yticks(ytickslocs)
ax.set_yticklabels(labels, fontsize=fontsize, color=label_color)
plt.tight_layout()
if grid is not False:
if grid is True:
grid = 'grey'
for i in range(0, width):
ratio1 = float(i)/width
ratio2 = float(i+2)/width
# TODO 1- set axis off
# 2 - set xlabels along the diagonal
# set colorbar either on left or bottom
if mode == 'lower':
plt.axvline(i+.5, ymin=1-ratio1, ymax=0., color=grid)
plt.axhline(i+.5, xmin=0, xmax=ratio2, color=grid)
if mode == 'upper':
plt.axvline(i+.5, ymin=1 - ratio2, ymax=1, color=grid)
plt.axhline(i+.5, xmin=ratio1, xmax=1, color=grid)
if mode in ['method', 'both']:
plt.axvline(i+.5, color=grid)
plt.axhline(i+.5, color=grid)
# can probably be simplified
if mode == 'lower':
plt.axvline(-.5, ymin=0, ymax=1, color='grey')
plt.axvline(width-.5, ymin=0, ymax=1./width, color='grey', lw=2)
plt.axhline(width-.5, xmin=0, xmax=1, color='grey',lw=2)
plt.axhline(-.5, xmin=0, xmax=1./width, color='grey',lw=2)
plt.xticks([])
for i in range(0, width):
plt.text(i, i-.6 ,labels[i],fontsize=fontsize,
color=label_color,
rotation=rotation, verticalalignment='bottom')
plt.text(-.6, i ,labels[i],fontsize=fontsize,
color=label_color,
rotation=0, horizontalalignment='right')
plt.axis('off')
# can probably be simplified
elif mode == 'upper':
plt.axvline(width-.5, ymin=0, ymax=1, color='grey', lw=2)
plt.axvline(-.5, ymin=1-1./width, ymax=1, color='grey', lw=2)
plt.axhline(-.5, xmin=0, xmax=1, color='grey',lw=2)
plt.axhline(width-.5, xmin=1-1./width, xmax=1, color='grey',lw=2)
plt.yticks([])
for i in range(0, width):
plt.text(-.6+i, i ,labels[i],fontsize=fontsize,
color=label_color, horizontalalignment='right',
rotation=0)
plt.text(i, -.5 ,labels[i],fontsize=fontsize,
color=label_color, rotation=rotation, verticalalignment='bottom')
plt.axis('off')
# set all ticks length to zero
ax = plt.gca()
ax.tick_params(axis='both',which='both', length=0)
if colorbar:
N = self.params['colorbar.N'] + 1
assert N >=2
cb = plt.gcf().colorbar(self.collection,
orientation=self.params['colorbar.orientation'],
shrink=self.params['colorbar.shrink'],
boundaries= np.linspace(0,1,N), ticks=[0,.25, 0.5, 0.75,1])
cb.ax.set_yticklabels([-1,-.5,0,.5,1])
cb.set_clim(0,1) # make sure it goes from -1 to 1 even though actual values may not reach that range
def _add_patches(self, df, method, fill, ax, diagonal=True):
width, height = df.shape
labels = (df.columns)
patches = []
colors = []
for x in range(width):
for y in range(height):
if fill == 'lower' and x > y:
continue
elif fill == 'upper' and x < y:
continue
if diagonal is False and x==y:
continue
datum = (df.iloc[x, y] +1.)/2.
d = df.iloc[x, y]
d_abs = np.abs(d)
#c = self.pvalues[x, y]
rotate = -45 if d > 0 else +45
#cmap = self.poscm if d >= 0 else self.negcm
if method in ['ellipse', 'square', 'rectangle', 'color']:
if method == 'ellipse':
func = Ellipse
patch = func((x, y), width=1 * self.shrink,
height=(self.shrink - d_abs*self.shrink), angle=rotate)
else:
func = Rectangle
w = h = d_abs * self.shrink
#FIXME shring must be <=1
offset = (1-w)/2.
if method == 'color':
w = 1
h = 1
offset = 0
patch = func((x + offset-.5, y + offset-.5), width=w,
height=h, angle=0)
if self.edgecolor:
patch.set_edgecolor(self.edgecolor)
#patch.set_facecolor(cmap(d_abs))
colors.append(datum)
if d_abs > 0.05:
patch.set_linestyle('dotted')
#ax.add_artist(patch)
patches.append(patch)
#FIXME edgecolor is always printed
elif method=='circle':
patch = Circle((x, y), radius=d_abs*self.shrink/2.)
if self.edgecolor:
patch.set_edgecolor(self.edgecolor)
#patch.set_facecolor(cmap(d_abs))
colors.append(datum)
if d_abs > 0.05:
patch.set_linestyle('dotted')
#ax.add_artist(patch)
patches.append(patch)
elif method in ['number', 'text']:
if d<0:
edgecolor = self.cm(-1.0)
elif d>=0:
edgecolor = self.cm(1.0)
d_str = "{:.2f}".format(d).replace("0.", ".").replace(".00", "")
ax.text(x,y, d_str, color=edgecolor,
fontsize=self.fontsize, horizontalalignment='center',
weight='bold', alpha=max(0.5, d_abs),
withdash=False)
elif method == 'pie':
S = 360 * d_abs
patch = [
Wedge((x,y), 1*self.shrink/2., -90, S-90),
Wedge((x,y), 1*self.shrink/2., S-90, 360-90),
]
#patch[0].set_facecolor(cmap(d_abs))
#patch[1].set_facecolor('white')
colors.append(datum)
colors.append(0.5)
if self.edgecolor:
patch[0].set_edgecolor(self.edgecolor)
patch[1].set_edgecolor(self.edgecolor)
#ax.add_artist(patch[0])
#ax.add_artist(patch[1])
patches.append(patch[0])
patches.append(patch[1])
else:
raise ValueError('Method for the symbols is not known. Use e.g, square, circle')
if self.binarise_color:
colors = [1 if color >0.5 else -1 for color in colors]
if len(patches):
col1 = PatchCollection(patches, array=np.array(colors), cmap=self.cm)
ax.add_collection(col1)
self.collection = col1
# Somehow a release of matplotlib prevent the edge color
# from working but the set_edgecolor on the collection itself does
# work...
if self.edgecolor:
self.collection.set_edgecolor(self.edgecolor)
| bsd-2-clause |
sniemi/SamPy | sandbox/webserver/cgi-bin/pages.py | 1 | 15221 | '''HTML Pages'''
import os, sys, glob
import time, datetime
import matplotlib
from matplotlib import dates as MD
from matplotlib import ticker as MT
matplotlib.use('Agg')
import numpy as N
import pylab as P
from math import *
def page1(output, camera, year, response):
# Introduction
print "<html>"
print "<head><center><b>HST Focus Model</b></center></head>"
print "<body style='background-color:Moccasin;''>"
print "<p> Calculations of the HST focus model and measurements of the HST focus are presented."
print " Model results can be obtained for any time range at 5 minute intervals beginning on March 9th 2003. </p> "
print "<p> Measured values are available for about one hour each month."
print "Each camera has its own operational time period listed below during which focus measurements were made"
print" A comparison between measurements and the model may be chosen for the measurement time periods. </p>"
# Radio buttons for display type
print "<br><b> Display </b><br>"
print "<form method='POST' action = 'control2.py'>"
if output == 'Model': print "<input type='radio' name='Output' value='Model' checked/> Model <br>"
else: print "<input type='radio' name='Output' value='Model'/> Model <br>"
if output == 'Measure': print "<input type='radio' name='Output' value='Measure' checked /> Measurement <br>"
else: print "<input type='radio' name='Output' value='Measure' /> Measurement <br>"
if output == 'Compare': print "<input type='radio' name='Output' value='Compare' checked /> Comparison <br>"
else: print "<input type='radio' name='Output' value='Compare' /> Comparison <br>"
# Radio buttons for camera
print "<br><b> Camera </b><br>"
if camera == 'HRC':print "<input type='radio' name='Camera' value='HRC' /> ACS / HRC January 22nd 2003 to January 21st 2007 <br>"
else: print "<input type='radio' name='Camera' value='HRC' /> ACS / HRC January 22nd 2003 to January 21st 2007 <br>"
if camera == 'PC': print "<input type='radio' name='Camera' value='PC' checked/> WFC2 / PC January 22nd 2003 to May 7th 2009 <br>"
else: print "<input type='radio' name='Camera' value='PC' checked/> WFC2 / PC January 22nd 2003 to May 7th 2009 <br>"
if camera == 'WFC1': print "<input type='radio' name='Camera' value='WFC1' checked /> ACS / WFC1 August 2009 to present <br>"
else: print "<input type='radio' name='Camera' value='WFC1' /> ACS / WFC1 August 2009 to present <br>"
if camera == 'WFC2': print "<input type='radio' name='Camera' value='WFC2' checked /> ACS / WFC2 <br>"
else: print "<input type='radio' name='Camera' value='WFC2' /> ACS / WFC2 <br>"
if camera == 'UVIS1': print "<input type='radio' name='Camera' value='UVIS1' checked /> WFC3 / UVIS1 August 2009 to present <br>"
else: print "<input type='radio' name='Camera' value='UVIS1' /> WFC3 / UVIS1 August 2009 to present <br>"
if camera == 'UVIS2': print "<input type='radio' name='Camera' value='UVIS2' checked /> WFC3 / UVIS2 <br>"
else: print "<input type='radio' name='Camera' value='UVIS2' /> WFC3 / UVIS2 <br>"
# Select year
#year = time.ctime()[-4:] # Default to current year
print "<br>"
print "Year: <input type='text' name='Year' size = '4' value = '%s'/> Years 2003 to present <br>" %year
# Submit reponses and move to next page
print "<br>"
print "<input type='submit' value='Select Time Period' />"
print "<br>"
print "</form>"
print "</body>"
def page2(output, year, camera, response):
print "<html>"
print "<head> <center> <b>Modelling Time Period</b> </center></head> "
print "<body style='background-color:Moccasin;''>"
print "<p> %s %s in %s </p>" %(output, camera, year)
print "<form action = 'control3.py' method='POST'>"
# Transmit information from first web page
print "<input type='hidden' name='Output' value= '%s' >" %output
print "<input type='hidden' name='Year' value= '%s' >" %year
print "<input type='hidden' name='Camera' value= '%s' >"%camera
if output == 'Model': #Arbitrary time period
print "<br/>"
print "Date: <input type='text' name='Date' size = '5'/> in form mm/dd <br>"
print "Start Time: <input type='text' name='Start' size = '5'/> Use 24-hour clock times <br>"
print " Stop Time: <input type='text' name='Stop' size = '5'/> in format hh:mm <br>"
else: # Select from times when focus measurements were taken
yy = int(year[-2:]) # Last two digits
direct = '/grp/hst/OTA/focus/source/FocusModel/'
if not os.path.exists(direct): direct = '/Users/cox/Documents/OTA/focus/' # Use local
measure = open(direct + camera + 'FocusHistory.txt','r')
focusData = measure.readlines()
measure.close()
dates = []
for line in focusData[1:]: # Skip first title line
bits = line.split()
dateString = bits[2]
(m,d,y) = dateString.split('/')
if int(y) == yy:
if len(dates) == 0 or dateString != dates[-1]: dates.append(dateString)
if len(dates) > 0:
print "<p><b>Select from list of available dates </b></p>"
print "<select name = 'Date'>"
for d in dates:
print "<option value = '%s' > '%s' </option>" %(d,d)
print "</select>"
print "<br/>"
else: print "<p><b>No measurements for ", camera, " in ", year, "</b></p>"
print "<br/>"
print "<input type='submit' value='Make Focus Plot' />"
print "</form>"
print "</body>"
def MeasurePlot(camera, year, date, output):
direct = '/grp/hst/OTA/focus/source/FocusModel/'
if not os.path.exists(direct):
print "Central Storage path not available"
direct = '/Users/cox/Documents/OTA/focus/' # Use local
measure = open(direct + camera + 'FocusHistory.txt','r')
focusData = measure.readlines()
measure.close()
# Prepare to collect requested focus measurements
tod = []
focus = []
timeAxis2 = []
for line in focusData: #Skip first title line
part = line.split()
if part[2] == date:
jdate = float(part[3])
tod.append(24.0*(jdate - floor(jdate)))
timeAxis2.append(jdate - 40587.0)
focus.append(float(part[4]))
#for ltemp in range(len(tod)) : print ltemp+1, tod[ltemp], focus[ltemp], "<br>"
P.plot(timeAxis2,focus,'ro-')
P.xlabel('Time of day')
P.ylabel('Focus position in microns')
ax = P.gca()
period = 24.0*(timeAxis2[-1]-timeAxis2[0])
stepSize = int(period/6)+1
hrs = MD.HourLocator(interval=stepSize)
ax.xaxis.set_major_locator(hrs)
hrFormat = MD.DateFormatter('%H:00')
ax.xaxis.set_major_formatter(hrFormat)
mins = MD.MinuteLocator(range(0,60,10))
ax.xaxis.set_minor_locator(mins)
if period < 1.0: # if period less than 1 hour label each 10 minutes
if period < 0.25: mins = MD.MinuteLocator(range(0,60,5)) # If less than 15 min,label every 5 min
if period < 0.09: mins = MD.MinuteLocator(range(60)) # Less than 5 min label every minute
ax.xaxis.set_minor_locator(mins)
minFormat = MD.DateFormatter('%H:%M')
ax.xaxis.set_minor_formatter(minFormat)
else: ax.xaxis.set_minor_formatter(MT.NullFormatter())
P.title('Measured Focus for ' + camera + ' ' + date)
P.grid(True)
if output == 'Compare' :
# Express times for input to Model plot
t1 = tod[0]
t2 = tod[-1]
h1 = int(t1)
m1 = int(60*(t1-h1))
h2 = int(t2)
m2 = int(60*(t2-h2))
start = '%02d:%02d' %(h1,m1)
stop = '%02d:%02d' %(h2,m2)
if t2-t1 < 0.17: print "Time range too short to model <br>" #if less than 10 minutes
else:
# Strip year from date
(m,d,y) = date.split('/')
date = m + '/' + d
ModelPlot(camera,year,date,output,start,stop)
P.title('Comparison of Measurement and Model for ' + camera + ' ' + date + '/' + year)
P.legend(('Measured', 'Model'), loc = 'best', shadow = True)
P.savefig('focusplot.png')
print "<p><img src='../focusplot.png'/></p>"
def ModelPlot(camera, year, date, output, startTime, stopTime):
if os.path.exists('/Volumes/grp'): thermal = "/Volumes/grp/hst/OTA/thermal/" # Mac access
elif os.path.exists('/grp'): thermal = "/grp/hst/OTA/thermal/" # Sun access
else : thermal = "./breathing/" # Temporary for use when Central Storage is not accessible
# Focus model offsets
camConst = {'PC': 261.1, 'HRC': 261.0, 'WFC1': 259.7, 'WFC2': 260.35, 'UVIS1': 259.39, 'UVIS2': 259.39}
secMove = {'2004.12.22':4.16, '2006.07.31':5.34, '2009.07.20':2.97}
# Define data lists
julian = []
temp1 = []
temp2 = []
temp3 = []
temp4 = []
temp5 = []
temp6 = []
hours = []
focusDate = time.strptime(date, '%m/%d')
timeAxis = []
timeAxis2 =[]
year = int(year)
month = focusDate[1]
day = focusDate[2]
# Get date-dependent focus adjustment
focusShift = 0.0
dateStamp = '%4d.%02d.%02d' %(year,month,day)
for k in secMove.keys():
if dateStamp > k:
focusShift = focusShift + secMove[k]
#print 'Secondary mirror move ', focusShift, " microns <br>"
dayOfYear = focusDate[7]
dayString = "%03d" % dayOfYear
yearString = str(year)
start = startTime.split(':')
stop = stopTime.split(':')
startHour = int(start[0])
startMinute = int(start[1])
stopHour = int(stop[0])
stopMinute = int(stop[1])
jday = toJulian(year,month,day)
jstart = jday +(startHour+startMinute/60.0)/24.0 - 40.0/(60.0*24.0) # 40 minute backtrack
jstop = jday + (stopHour + stopMinute/60.0)/24.0
fileName = 'thermalData' + yearString + '.dat'
if not(os.access(thermal + fileName, os.F_OK)): print filename, " File not found <br>"
f=open(thermal + fileName, 'r')
while f:
line = f.readline()
if line == '' : break
columns = line.split()
timeStamp = columns[0]
jul = float(columns[1])
if jstart <= jul <= jstop :
julian.append(jul)
tup = fromJulian(jul)
hr = tup[3]+ (tup[4]+tup[5]/60.0)/60.0 # Extract hours
hours.append(hr)
tobj = datetime.datetime(tup[0], tup[1], tup[2], tup[3], tup[4], tup[5])
timeAxis.append(tobj)
timeAxis2.append(jul-40587.0) # Days since 1900-01-01
temp1.append(float(columns[2]))
temp2.append(float(columns[3]))
temp3.append(float(columns[4]))
temp4.append(float(columns[5]))
temp5.append(float(columns[6]))
temp6.append(float(columns[7]))
if day > dayOfYear : break
f.close()
if len(temp1) == 0: # No temperature data in time range
print 'No matching thermal data <br>'
return
jtime = N.array(julian)
aftLS = N.array(temp1)
trussAxial = N.array(temp2)
trussDiam = N.array(temp3)
aftShroud = N.array(temp4)
fwdShell = N.array(temp5)
lightShield = N.array(temp6)
#tBreath is value of light shield temp minus average of previous eight values
tBreath = lightShield.copy() # Make a real copy
l = N.size(tBreath)
if l < 1:
print 'No temperature data <br>'
return
r1 = range(8)
tBreath[r1] = 0.0 # Set first 8 points to zero
r2 = range(8,l) # Calculate 9th and onward
for r in r2:
tBreath[r] = 0.7*(lightShield[r]-sum(lightShield[r-8:r])/8.0)
focusModel = camConst[camera] + focusShift \
- 0.0052*jtime + 0.48*aftLS + 0.81*trussAxial - 0.28*aftShroud + 0.18*fwdShell + 0.55*tBreath
print "Average model %10.2f microns <br>" % (N.mean(focusModel[8:]))
# Just the Bely term
Bely = 0.55*tBreath
bShift = N.mean(focusModel)- N.mean(Bely)
Bely = Bely +bShift
# Time independent Focus model with mean zero offset
flatModel= camConst[camera] + focusShift \
+ 0.48*aftLS + 0.81*trussAxial - 0.28*aftShroud + 0.18*fwdShell + 0.55*tBreath - 281.64
#print "Flat model %10.2f microns <br>" % (N.mean(flatModel[8:]))
if l > 9:
P.plot(timeAxis2[8:],focusModel[8:], '-bo')
#P.plot(hours[8:], Bely[8:], '-g+')
P.title(camera + ' Model ' + date + '/' + yearString)
P.xlabel('Time of day')
P.ylabel('Focus position in microns')
P.grid(True)
ax = P.gca()
period = hours[-1]-hours[8]
stepSize = int(period/6)+1
hrs = MD.HourLocator(interval=stepSize)
ax.xaxis.set_major_locator(hrs)
hrFormat = MD.DateFormatter('%H:00')
ax.xaxis.set_major_formatter(hrFormat)
mins = MD.MinuteLocator(range(0,60,10))
if period < 0.33: mins = MD.MinuteLocator(range(0,60,5)) # if less than 20 minutes mark each 5-minute
ax.xaxis.set_minor_locator(mins)
if period < 1.0: # if period less than 1 hour label each 10 minutes
minFormat = MD.DateFormatter('%H:%M')
ax.xaxis.set_minor_formatter(minFormat)
firstMinute = 60*startHour + 10.0*floor(startMinute/10.0) # Round down to earlier 10-minute
lastMinute = 60*stopHour + 10.0*ceil(stopMinute/10.0) # Round up to next 10-minute
firstTime = int(julian[8])-40587 + firstMinute/1440.0
lastTime = int(julian[-1])-40587 + lastMinute/1440.0
P.xlim(firstTime,lastTime)
else:
ax.xaxis.set_minor_formatter(MT.NullFormatter())
firstTime = int(julian[8])-40587 + startHour/24.0
lastTime = int(julian[-1])-40587 + (stopHour+1)/24.0
P.xlim(firstTime,lastTime)
if output == 'Model' :
P.savefig('focusplot.png')
print "<p><img src='../focusplot.png'/></p>"
# Make up an output file
outfile = './plotdata' + dateStamp + '.txt'
op = open(outfile,'w')
op.write('Julian Date Date Time Model Flat Model\n')
for r in range(8,l):
dataString1 = '%12.6f' %jtime[r]
dataString2 = timeAxis[r].strftime(' %b %d %Y %H:%M:%S')
dataString3 = '%8.4f %8.4f \n'% (focusModel[r], flatModel[r])
op.write(dataString1 + dataString2 + dataString3)
t = timeAxis[r]
op.close()
#print "Plot file may be found at <a href = '../focusplot.png'> Plot </a> <br>"
#print "Data file may be found at <a href = '../%s'> Output </a> <br>" %outfile
return
def toJulian(year,month,day) :
'''Use time functions'''
dateString = str(year) + ' ' + str(month) + ' ' + str(day) + ' UTC'
tup = time.strptime(dateString, '%Y %m %d %Z')
sec = time.mktime(tup)
days = (sec-time.timezone)/86400.0 # Cancel time zone correction
jday = days + 40587 # Julian date of Jan 1 1900
return jday
def fromJulian(j):
days = j-40587 # From Jan 1 1900
sec = days*86400.0
tup = time.gmtime(sec)
return tup
| bsd-2-clause |
xcgoner/dist-mxnet | example/svm_mnist/svm_mnist.py | 44 | 4094 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#############################################################
## Please read the README.md document for better reference ##
#############################################################
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.decomposition import PCA
# import matplotlib.pyplot as plt
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Network declaration as symbols. The following pattern was based
# on the article, but feel free to play with the number of nodes
# and with the activation function
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=512)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 512)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
# Here we add the ultimate layer based on L2-SVM objective
mlp = mx.symbol.SVMOutput(data=fc3, name='svm')
# To use L1-SVM objective, comment the line above and uncomment the line below
# mlp = mx.symbol.SVMOutput(data=fc3, name='svm', use_linear=True)
# Now we fetch MNIST dataset, add some noise, as the article suggests,
# permutate and assign the examples to be used on our network
mnist = fetch_mldata('MNIST original')
mnist_pca = PCA(n_components=70).fit_transform(mnist.data)
noise = np.random.normal(size=mnist_pca.shape)
mnist_pca += noise
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist_pca.shape[0])
X = mnist_pca[p]
Y = mnist.target[p]
X_show = mnist.data[p]
# This is just to normalize the input and separate train set and test set
X = X.astype(np.float32)/255
X_train = X[:60000]
X_test = X[60000:]
X_show = X_show[60000:]
Y_train = Y[:60000]
Y_test = Y[60000:]
# Article's suggestion on batch size
batch_size = 200
train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size, label_name='svm_label')
test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size, label_name='svm_label')
# Here we instatiate and fit the model for our data
# The article actually suggests using 400 epochs,
# But I reduced to 10, for convinience
mod = mx.mod.Module(
context = mx.cpu(0), # Run on CPU 0
symbol = mlp, # Use the network we just defined
label_names = ['svm_label'],
)
mod.fit(
train_data=train_iter,
eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch
batch_end_callback = mx.callback.Speedometer(batch_size, 200), # Logging module to print out progress
num_epoch = 10, # Train for 10 epochs
optimizer_params = {
'learning_rate': 0.1, # Learning rate
'momentum': 0.9, # Momentum for SGD with momentum
'wd': 0.00001, # Weight decay for regularization
},
)
# Uncomment to view an example
# plt.imshow((X_show[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')
# plt.show()
# print 'Result:', model.predict(X_test[0:1])[0].argmax()
# Now it prints how good did the network did for this configuration
print('Accuracy:', mod.score(test_iter, mx.metric.Accuracy())[0][1]*100, '%')
| apache-2.0 |
btabibian/scikit-learn | sklearn/feature_extraction/hashing.py | 5 | 6830 | # Author: Lars Buitinck
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
alternate_sign : boolean, optional, default True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
non_negative : boolean, optional, default False
When True, an absolute value is applied to the features matrix prior to
returning it. When used in conjunction with alternate_sign=True, this
significantly reduces the inner product preservation property.
.. deprecated:: 0.19
This option will be removed in 0.21.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, alternate_sign=True, non_negative=False):
self._validate_params(n_features, input_type)
if non_negative:
warnings.warn("the option non_negative=True has been deprecated"
" in 0.19 and will be removed"
" in version 0.21.", DeprecationWarning)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype,
self.alternate_sign)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
QuantCrimAtLeeds/PredictCode | open_cp/scripted/analysis.py | 1 | 9254 | """
analysis.py
~~~~~~~~~~~
Various routines to perform standard analysis, and/or visualisation, tasks.
"""
import matplotlib.pyplot as _plt
import matplotlib.collections as _mpl_collections
import descartes as _descartes
import csv as _csv
import collections as _collections
import scipy.stats as _stats
import open_cp.plot as _plot
import numpy as _np
def _add_outline(loaded, ax):
p = _descartes.PolygonPatch(loaded.geometry, fc="none", ec="black")
ax.add_patch(p)
ax.set_aspect(1)
PredictionKey = _collections.namedtuple("PredictionKey", "name details")
def _split_by_comma_not_in_brackets(name):
bracket_count = 0
out = ""
for c in name:
if c == "(":
bracket_count += 1
elif c == ")":
bracket_count -= 1
if c == ",":
if bracket_count == 0:
yield out
out = ""
else:
out += c
else:
out += c
yield out
def parse_key_details(details):
"""Take a dictionary of "details", as returned by
:func:`parse_prediction_key`, and splits recursively into dictionaries.
"""
out = {}
for k,v in details.items():
try:
name, dets = parse_prediction_key(v)
out[k] = {name:dets}
except:
out[k] = v
return out
def parse_prediction_key(key):
"""The "name" or "key" of a predictor is assumed to be like:
`ProHotspotCtsProvider(Weight=Classic(sb=400, tb=8), DistanceUnit=150)`
Parse this into a :class:`PredictionKey` instance, where
- `name` == "ProHotspotCtsProvider"
- `details` will be the dict: {"Weight" : "Classic(sb=400, tb=8)",
"DistanceUnit" : 150}
(Attempts to parse to ints or floats if possible).
"""
if "(" not in key:
return PredictionKey(key, {})
i = key.index("(")
name = key[:i].strip()
dets = key[i+1:-1]
dets = [x.strip() for x in _split_by_comma_not_in_brackets(dets)]
details = {}
for x in dets:
if "=" not in x:
key, value = x, None
else:
i = x.index("=")
key = x[:i].strip()
value = x[i+1:].strip()
try:
value = int(value)
except ValueError:
pass
if isinstance(value, str):
try:
value = float(value)
except ValueError:
pass
details[key] = value
return PredictionKey(name, details)
def plot_prediction(loaded, prediction, ax):
"""Visualise a single prediction.
:param loaded: Instance of :class:`Loader`
:param prediction: The prediction to plot
:param ax: `matplotlib` Axis object to draw to.
"""
_add_outline(loaded, ax)
m = ax.pcolor(*prediction.mesh_data(), prediction.intensity_matrix, cmap="Greys")
_plt.colorbar(m, ax=ax)
def _set_standard_limits(loaded, ax):
xmin, ymin, xmax, ymax = loaded.geometry.bounds
d = max(xmax - xmin, ymax - ymin) / 20
ax.set(xlim=[xmin-d, xmax+d], ylim=[ymin-d, ymax+d])
def plot_data_scatter(loaded, ax):
"""Produce a scatter plot of the input data.
:param loaded: Instance of :class:`Loader`
:param ax: `matplotlib` Axis object to draw to.
"""
_add_outline(loaded, ax)
ax.scatter(*loaded.timed_points.coords, marker="x", linewidth=1, color="black", alpha=0.5)
_set_standard_limits(loaded, ax)
def plot_data_grid(loaded, ax):
"""Produce a plot of masked grid we used.
:param loaded: Instance of :class:`Loader`
:param ax: `matplotlib` Axis object to draw to.
"""
_add_outline(loaded, ax)
pc = _mpl_collections.PatchCollection(_plot.patches_from_grid(loaded.grid),
facecolors="none", edgecolors="black")
ax.add_collection(pc)
_set_standard_limits(loaded, ax)
def _open_text_file(filename, callback):
need_close = False
if isinstance(filename, str):
file = open(filename, "rt", newline="")
need_close = True
else:
file = filename
try:
return callback(file)
finally:
if need_close:
file.close()
def hit_counts_to_beta(csv_file):
"""Using the data from the csv_file, return the beta distributed posterior
given the hit count data. This gives an indication of the "hit rate" and
its variance.
:param csv_file: Filename to load, or file-like object
:return: Dictionary from prediction name to dictionary from coverage level
to a :class:`scipy.stats.beta` instance.
"""
def func(file):
reader = _csv.reader(file)
header = next(reader)
if header[:4] != ["Predictor", "Start time", "End time" ,"Number events"]:
raise ValueError("Input file is not from `HitCountSave`")
coverages = [int(x[:-1]) for x in header[4:]]
counts = _collections.defaultdict(int)
hits = _collections.defaultdict(lambda : _collections.defaultdict(int))
for row in reader:
name = row[0]
counts[name] += int(row[3])
for cov, value in zip(coverages, row[4:]):
hits[name][cov] += int(value)
betas = {name : dict() for name in counts}
for name in counts:
for cov in coverages:
a = hits[name][cov]
b = counts[name] - a
betas[name][cov] = _stats.beta(a, b)
return betas
return _open_text_file(csv_file, func)
def single_hit_counts_to_beta(hit_counts):
"""Convert a dictionary of hit_counts to beta distributed posteriors.
:param hit_counts: Dictionary from arbitrary keys to dictionarys from
coverage level to pairs `(hit_count, total_count)`.
:return: Dictionary from coverage levels to :class:`scipy.stats.beta`
instances.
"""
total_counts = {}
for key, cov_to_counts in hit_counts.items():
for cov, (hit, total) in cov_to_counts.items():
if cov not in total_counts:
total_counts[cov] = (0, 0)
total_counts[cov] = total_counts[cov][0] + hit, total_counts[cov][1] + total
return {k : _stats.beta(a, b-a) for k, (a,b) in total_counts.items()}
def plot_betas(betas, ax, coverages=None, plot_sds=True):
"""Plot hit rate curves using the data from :func:`hit_counts_to_beta`.
Plots the median and +/-34% (roughly a +/- 1 standard deviation) of the
posterior estimate of the hit-rate probability.
:param betas: Dict as from :func:`hit_counts_to_beta`.
:param ax: `matplotlib` Axis object to draw to.
:param coverages: If not `None`, plot only these coverages.
:param plot_sds: If `False` then omit the "standard deviation" ranges.
"""
if coverages is not None:
coverages = list(coverages)
for name, data in betas.items():
if coverages is None:
x = _np.sort(list(data))
else:
x = _np.sort(coverages)
y = [data[xx].ppf(0.5) for xx in x]
ax.plot(x,y,label=name)
if plot_sds:
y1 = [data[xx].ppf(0.5 - 0.34) for xx in x]
y2 = [data[xx].ppf(0.5 + 0.34) for xx in x]
ax.fill_between(x,y1,y2,alpha=0.5)
ax.legend()
ax.set(xlabel="Coverage (%)", ylabel="Hit rate (probability)")
def _mean_or_zero(beta_dist):
if beta_dist.args[0] == 0:
return 0
return beta_dist.mean()
def compute_betas_means_against_max(betas, coverages=None):
"""Compute hit rate curves using the data from :func:`hit_counts_to_beta`.
We use the mean "hit rate" and normalise against the maximum hit rate
at that coverage from any prediction.
:param betas: Dict as from :func:`hit_counts_to_beta`.
:param coverages: If not `None`, plot only these coverages.
:return: Pair `(x, d)` where `x` is the coverage values used, and
`d` is a dictionary from `betas` to list of y values.
"""
if coverages is not None:
x = _np.sort(list(coverages))
else:
data = next(iter(betas.values()))
x = _np.sort(list(data))
ycs = dict()
for name, data in betas.items():
ycs[name] = [_mean_or_zero(data[xx]) for xx in x]
maximum = [ max(ycs[k][i] for k in ycs) for i in range(len(x)) ]
def div_or_zero(y, m):
if m == 0:
return 0
return y / m
return x, {k : [div_or_zero(y, m) for y,m in zip(ycs[k], maximum)] for k in ycs}
def plot_betas_means_against_max(betas, ax, coverages=None):
"""Plot hit rate curves using the data from :func:`hit_counts_to_beta`.
We use the mean "hit rate" and normalise against the maximum hit rate
at that coverage from any prediction.
:param betas: Dict as from :func:`hit_counts_to_beta`.
:param ax: `matplotlib` Axis object to draw to.
:param coverages: If not `None`, plot only these coverages.
:return: Dictionary from keys of `betas` to list of y values.
"""
x, normed = compute_betas_means_against_max(betas, coverages)
for name, y in normed.items():
ax.plot(x,y,label=name)
ax.set(ylabel="Fraction of maximum hit rate", xlabel="Coverage (%)")
return normed
| artistic-2.0 |
WillieMaddox/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.21/_downloads/a5d4e64d0843ff17526c0588f9967f97/plot_covariance_whitening_dspm.py | 16 | 6905 | """
.. _ex-covariance-whitening-dspm:
===================================================
Demonstrate impact of whitening on source estimates
===================================================
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in
:footcite:`EngemannGramfort2015`. This example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in
:footcite:`EngemannGramfort2015`.
This example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
.. warning:: Please do not copy the patterns presented here for your own
analysis, this is example is purely illustrative.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
##############################################################################
# Get data
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw.crop(0, 150.).pick_types(meg=True, stim=True, exclude='bads').load_data()
raw.filter(None, 20.)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(mag=3e-12)
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
clim = dict(kind='value', lims=[0, 2.5, 5])
###############################################################################
# Estimate covariances
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
epochs = mne.Epochs(
raw, events, event_ids, tmin, tmax,
baseline=baseline, preload=True, reject=reject, decim=8)
del raw
noise_covs = list()
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
idx = np.sort(np.concatenate([
np.where(epochs.events[:, 2] == event_ids[cond])[0][:n_train]
for cond in conditions]))
epochs_train = epochs[idx]
epochs_train.equalize_event_counts(event_ids)
assert len(epochs_train) == 2 * n_train
# We know some of these have too few samples, so suppress warning
# with verbose='error'
noise_covs.append(compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True, rank=None, verbose='error')) # returns list
# prepare contrast
evokeds.append([epochs_train[k].average() for k in conditions])
del epochs_train
del epochs
# Make forward
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
# oct5 and add_dist are just for speed, not recommended in general!
src = mne.setup_source_space(
'spm', spacing='oct5', subjects_dir=data_path + '/subjects',
add_dist=False)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(evokeds[0][0].info, trans, src, bem)
del src
for noise_covs_, evokeds_ in zip(noise_covs, evokeds):
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs_:
inverse_operator = make_inverse_operator(
evokeds_[0].info, forward, cov, loose=0.2, depth=0.8)
assert len(inverse_operator['sing']) == 274 # sanity check
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds_)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, cov, stc, stc_a, stc_b
del forward, noise_covs, evokeds # save some memory
##############################################################################
# Show the resulting source estimates
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 5))
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim,
initial_time=0.175, background='w', foreground='k')
brain.show_view('ven')
im = brain.screenshot()
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set(title='{0} epochs'.format(n_train * 2),
xlabel='Time (ms)', ylabel='Source Activation (dSPM)',
xlim=(tmin * 1e3, tmax * 1e3), ylim=(-3, 3))
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.2, left=0.01, right=0.99, wspace=0.03)
###############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
mcdeoliveira/pyctrl | examples/simulated_motor_2.py | 3 | 3579 | #!/usr/bin/env python3
def main():
# import python's standard math module and numpy
import math, numpy, sys
# import Controller and other blocks from modules
from pyctrl.timer import Controller
from pyctrl.block import Interp, Logger, Constant
from pyctrl.block.system import System, Differentiator
from pyctrl.system.tf import DTTF, LPF
# initialize controller
Ts = 0.01
simotor = Controller(period = Ts)
# build interpolated input signal
ts = [0, 1, 2, 3, 4, 5, 5, 6]
us = [0, 0, 100, 100, -50, -50, 0, 0]
# add pwm signal
simotor.add_signal('pwm')
# add filter to interpolate data
simotor.add_filter('input',
Interp(xp = us, fp = ts),
['clock'],
['pwm'])
# Motor model parameters
tau = 1/55 # time constant (s)
g = 0.092 # gain (cycles/sec duty)
c = math.exp(-Ts/tau)
d = (g*Ts)*(1-c)/2
# add motor signals
simotor.add_signal('encoder')
# add motor filter
simotor.add_filter('motor',
System(model = DTTF(
numpy.array((0, d, d)),
numpy.array((1, -(1 + c), c)))),
['pwm'],
['encoder'])
# add motor speed signal
simotor.add_signal('speed')
# add motor speed filter
simotor.add_filter('speed',
Differentiator(),
['clock','encoder'],
['speed'])
# add low-pass signal
simotor.add_signal('fspeed')
# add low-pass filter
simotor.add_filter('LPF',
System(model = LPF(fc = 5, period = Ts)),
['speed'],
['fspeed'])
# add logger
simotor.add_sink('logger',
Logger(),
['clock','pwm','encoder','speed','fspeed'])
# Add a timer to stop the controller
simotor.add_timer('stop',
Constant(value = 0),
None, ['is_running'],
period = 6, repeat = False)
# print controller info
print(simotor.info('all'))
try:
# run the controller
print('> Run the controller.')
with simotor:
# wait for the controller to finish on its own
simotor.join()
print('> Done with the controller.')
except KeyboardInterrupt:
pass
finally:
pass
# read logger
data = simotor.get_sink('logger', 'log')
try:
# import matplotlib
import matplotlib.pyplot as plt
except:
print('! Could not load matplotlib, skipping plots')
sys.exit(0)
print('> Will plot')
try:
# start plot
plt.figure()
except:
print('! Could not plot graphics')
print('> Make sure you have a connection to a windows manager')
sys.exit(0)
# plot pwm
ax1 = plt.gca()
ax1.plot(data['clock'], data['pwm'],'g', label='pwm')
ax1.set_ylabel('pwm (%)')
ax1.set_ylim((-60,120))
ax1.grid()
plt.legend(loc = 2)
# plot velocity
ax2 = plt.twinx()
ax2.plot(data['clock'], data['speed'],'b', label='speed')
ax2.plot(data['clock'], data['fspeed'], 'r', label='fspeed')
ax2.set_ylabel('speed (Hz)')
ax2.set_ylim((-6,12))
ax2.set_xlim(0,6)
ax2.grid()
plt.legend(loc = 1)
# show plots
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
georgid/SourceFilterContoursMelody | smstools/software/transformations_interface/hpsMorph_function.py | 24 | 7354 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile1='../../sounds/violin-B3.wav', window1='blackman', M1=1001, N1=1024, t1=-100,
minSineDur1=0.05, nH=60, minf01=200, maxf01=300, f0et1=10, harmDevSlope1=0.01, stocf=0.1,
inputFile2='../../sounds/soprano-E4.wav', window2='blackman', M2=901, N2=1024, t2=-100,
minSineDur2=0.05, minf02=250, maxf02=500, f0et2=10, harmDevSlope2=0.01):
"""
Analyze two sounds with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; stocEnv: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sounds
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
# compute analysis windows
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
# compute the harmonic plus stochastic models
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram stochastic component of sound 1
plt.subplot(2,1,1)
numFrames = int(stocEnv1[:,0].size)
sizeEnv = int(stocEnv1[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs1)
binFreq = (.5*fs1)*np.arange(sizeEnv*maxplotfreq/(.5*fs1))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv1[:,:sizeEnv*maxplotfreq/(.5*fs1)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 1
if (hfreq1.shape[1] > 0):
harms = np.copy(hfreq1)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs1)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram of sound 1')
# plot spectrogram stochastic component of sound 2
plt.subplot(2,1,2)
numFrames = int(stocEnv2[:,0].size)
sizeEnv = int(stocEnv2[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs2)
binFreq = (.5*fs2)*np.arange(sizeEnv*maxplotfreq/(.5*fs2))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv2[:,:sizeEnv*maxplotfreq/(.5*fs2)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 2
if (hfreq2.shape[1] > 0):
harms = np.copy(hfreq2)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs2)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram of sound 2')
plt.tight_layout()
plt.show(block=False)
return inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2
def transformation_synthesis(inputFile1, fs, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2,
hfreqIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1]), hmagIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1]), stocIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1])):
"""
Transform the analysis values returned by the analysis function and synthesize the sound
inputFile1: name of input file 1
fs: sampling rate of input file 1
hfreq1, hmag1, stocEnv1: hps representation of sound 1
inputFile2: name of input file 2
hfreq2, hmag2, stocEnv2: hps representation of sound 2
hfreqIntp: interpolation factor between the harmonic frequencies of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
hmagIntp: interpolation factor between the harmonic magnitudes of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
stocIntp: interpolation factor between the stochastic representation of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# morph the two sounds
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
# synthesis
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile1)[:-4] + '_hpsMorph.wav'
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = np.copy(yhfreq)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2 = analysis()
# transformation and synthesis
transformation_synthesis (inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2)
plt.show()
| gpl-3.0 |
pearsonlab/nipype | doc/conf.py | 6 | 8363 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set fileencoding=utf-8 ft=python sts=4 ts=4 sw=4 et:
#
# nipype documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 20 12:30:18 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
nipypepath = os.path.abspath('..')
sys.path.insert(1, nipypepath)
import nipype
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.graphviz',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nipype'
copyright = u'2009-15, Neuroimaging in Python team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = nipype.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y, %H:%M PDT'
# List of documents that shouldn't be included in the build.
unused_docs = ['api/generated/gen']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Sphinxext configuration ---------------------------------------------------
# Set attributes for layout of inheritance diagrams
inheritance_graph_attrs = dict(rankdir="LR", size='"6.0, 8.0"', fontsize=14,
ratio='compress')
inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
color='dodgerblue1', style='filled')
# Flag to show todo items in rendered output
todo_include_todos = True
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'nipype.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'nipy pipeline and interfaces package'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
html_index = 'index.html'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['gse.html', 'localtoc.html', 'sidebar_versions.html', 'indexsidebar.html'],
'searchresults': ['sidebar_versions.html', 'indexsidebar.html'],
'version': []}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'nipypedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('documentation', 'nipype.tex', u'nipype Documentation',
u'Neuroimaging in Python team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
exclude_patterns = ['interfaces/generated/gen.rst', 'api/generated/gen.rst']
| bsd-3-clause |
initNirvana/Easyphotos | env/lib/python3.4/site-packages/IPython/config/loader.py | 4 | 29399 | # encoding: utf-8
"""A simple configuration system."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import copy
import logging
import os
import re
import sys
import json
from ast import literal_eval
from IPython.utils.path import filefind, get_ipython_dir
from IPython.utils import py3compat
from IPython.utils.encoding import DEFAULT_ENCODING
from IPython.utils.py3compat import unicode_type, iteritems
from IPython.utils.traitlets import HasTraits, List, Any
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in iteritems(other):
if k not in self:
to_update[k] = copy.deepcopy(v)
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = copy.deepcopy(v)
self.update(to_update)
def collisions(self, other):
"""Check for collisions between two config objects.
Returns a dict of the form {"Class": {"trait": "collision message"}}`,
indicating which values have been ignored.
An empty dict indicates no collisions.
"""
collisions = {}
for section in self:
if section not in other:
continue
mine = self[section]
theirs = other[section]
for key in mine:
if key in theirs and mine[key] != theirs[key]:
collisions.setdefault(section, {})
collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
return collisions
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
# copy nested config objects
for k, v in self.items():
if isinstance(v, Config):
new_config[k] = v.copy()
return new_config
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
new_config = type(self)()
for key, value in self.items():
if isinstance(value, (Config, LazyConfigValue)):
# deep copy config objects
value = copy.deepcopy(value, memo)
elif type(value) in {dict, list, set, tuple}:
# shallow copy plain container traits
value = copy.copy(value)
new_config[key] = value
return new_config
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from IPython.utils.log import get_logger
return get_logger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`IPython.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
self.log.warn("Unrecognized JSON config file version, assuming version {}".format(version))
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
# This closure is made available in the namespace that is used
# to exec the config file. It allows users to call
# load_subconfig('myconfig.py') to load config files recursively.
# It needs to be a closure because it has references to self.path
# and self.config. The sub-config is loaded with the same path
# as the parent, but it uses an empty config which is then merged
# with the parents.
# If a profile is specified, the config file will be loaded
# from that profile
def load_subconfig(fname, profile=None):
# import here to prevent circular imports
from IPython.core.profiledir import ProfileDir, ProfileDirError
if profile is not None:
try:
profile_dir = ProfileDir.find_profile_dir_by_name(
get_ipython_dir(),
profile,
)
except ProfileDirError:
return
path = profile_dir.location
else:
path = self.path
loader = PyFileConfigLoader(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there. This happens
# when a user s using a profile, but not the default config.
pass
else:
self.config.merge(sub_config)
# Again, this needs to be a closure and should be used in config
# files to get the config being loaded.
def get_config():
return self.config
namespace = dict(
load_subconfig=load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with literal_eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = literal_eval(rhs)
except (NameError, SyntaxError, ValueError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in iteritems(cfg):
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from IPython.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stdin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warn("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in iteritems(vars(self.parsed_data)):
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in iteritems(aliases):
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode_type, dest=value, nargs=nargs)
for key, (value, help) in iteritems(flags):
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in iteritems(vars(self.parsed_data)):
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| mit |
Nikea/scikit-xray | doc/sphinxext/plot_generator.py | 8 | 10150 | """
Sphinx plugin to run example scripts and create a gallery page.
Taken from seaborn project, which is turn was lightly
modified from the mpld3 project.
"""
from __future__ import division
import os
import os.path as op
import re
import glob
import token
import tokenize
import shutil
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import image
RST_TEMPLATE = """
.. _{sphinx_tag}:
{docstring}
.. image:: {img_file}
**Python source code:** :download:`[download source: {fname}]<{fname}>`
.. literalinclude:: {fname}
:lines: {end_line}-
"""
INDEX_TEMPLATE = """
.. raw:: html
<style type="text/css">
.figure {{
position: relative;
float: left;
margin: 10px;
width: 180px;
height: 200px;
}}
.figure img {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure:hover img {{
-webkit-filter: blur(3px);
-moz-filter: blur(3px);
-o-filter: blur(3px);
-ms-filter: blur(3px);
filter: blur(3px);
opacity:1.0;
filter:alpha(opacity=100); /* For IE8 and earlier */
}}
.figure span {{
position: absolute;
display: inline;
left: 0;
width: 170px;
height: 170px;
background: #000;
color: #fff;
visibility: hidden;
opacity: 0;
z-index: 100;
}}
.figure p {{
position: absolute;
top: 45%;
width: 170px;
font-size: 110%;
}}
.figure:hover span {{
visibility: visible;
opacity: .4;
}}
.caption {{
position: absolue;
width: 180px;
top: 170px;
text-align: center !important;
}}
</style>
.. _{sphinx_tag}:
Example gallery
===============
{toctree}
{contents}
.. raw:: html
<div style="clear: both"></div>
"""
def create_thumbnail(infile, thumbfile,
width=300, height=300,
cx=0.5, cy=0.5, border=4):
baseout, extout = op.splitext(thumbfile)
im = image.imread(infile)
rows, cols = im.shape[:2]
x0 = int(cx * cols - .5 * width)
y0 = int(cy * rows - .5 * height)
xslice = slice(x0, x0 + width)
yslice = slice(y0, y0 + height)
thumb = im[yslice, xslice]
thumb[:border, :, :3] = thumb[-border:, :, :3] = 0
thumb[:, :border, :3] = thumb[:, -border:, :3] = 0
dpi = 100
fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], aspect='auto',
frameon=False, xticks=[], yticks=[])
ax.imshow(thumb, aspect='auto', resample=True,
interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
def indent(s, N=4):
"""indent a string"""
return s.replace('\n', '\n' + N * ' ')
class ExampleGenerator(object):
"""Tools for generating an example page from a file"""
def __init__(self, filename, target_dir):
self.filename = filename
self.target_dir = target_dir
self.thumbloc = .5, .5
self.extract_docstring()
with open(filename, "r") as fid:
self.filetext = fid.read()
outfilename = op.join(target_dir, self.rstfilename)
# Only actually run it if the output RST file doesn't
# exist or it was modified less recently than the example
if (not op.exists(outfilename)
or (op.getmtime(outfilename) < op.getmtime(filename))):
self.exec_file()
else:
print("skipping {0}".format(self.filename))
@property
def dirname(self):
return op.split(self.filename)[0]
@property
def fname(self):
return op.split(self.filename)[1]
@property
def modulename(self):
return op.splitext(self.fname)[0]
@property
def pyfilename(self):
return self.modulename + '.py'
@property
def rstfilename(self):
return self.modulename + ".rst"
@property
def htmlfilename(self):
return self.modulename + '.html'
@property
def pngfilename(self):
pngfile = self.modulename + '.png'
return "_images/" + pngfile
@property
def thumbfilename(self):
pngfile = self.modulename + '_thumb.png'
return pngfile
@property
def sphinxtag(self):
return self.modulename
@property
def pagetitle(self):
return self.docstring.strip().split('\n')[0].strip()
@property
def plotfunc(self):
match = re.search(r"sns\.(.+plot)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+map)\(", self.filetext)
if match:
return match.group(1)
match = re.search(r"sns\.(.+Grid)\(", self.filetext)
if match:
return match.group(1)
return ""
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs,
# extract the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')
).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
thumbloc = None
for i, line in enumerate(docstring.split("\n")):
m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
if m:
thumbloc = float(m.group(1)), float(m.group(2))
break
if thumbloc is not None:
self.thumbloc = thumbloc
docstring = "\n".join([l for l in docstring.split("\n")
if not l.startswith("_thumb")])
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def exec_file(self):
print("running {0}".format(self.filename))
plt.close('all')
my_globals = {'pl': plt,
'plt': plt}
execfile(self.filename, my_globals)
fig = plt.gcf()
fig.canvas.draw()
pngfile = op.join(self.target_dir, self.pngfilename)
thumbfile = op.join("example_thumbs", self.thumbfilename)
self.html = "<img src=../%s>" % self.pngfilename
fig.savefig(pngfile, dpi=75, bbox_inches="tight")
cx, cy = self.thumbloc
create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)
def toctree_entry(self):
return " ./%s\n\n" % op.splitext(self.htmlfilename)[0]
def contents_entry(self):
return (".. raw:: html\n\n"
" <div class='figure align-center'>\n"
" <a href=./{0}>\n"
" <img src=../_static/{1}>\n"
" <span class='figure-label'>\n"
" <p>{2}</p>\n"
" </span>\n"
" </a>\n"
" </div>\n\n"
"\n\n"
"".format(self.htmlfilename,
self.thumbfilename,
self.plotfunc))
def main(app):
static_dir = op.join(app.builder.srcdir, '_static')
target_dir = op.join(app.builder.srcdir, 'examples')
image_dir = op.join(app.builder.srcdir, 'examples/_images')
thumb_dir = op.join(app.builder.srcdir, "example_thumbs")
source_dir = op.abspath(op.join(app.builder.srcdir,
'..', 'examples'))
if not op.exists(static_dir):
os.makedirs(static_dir)
if not op.exists(target_dir):
os.makedirs(target_dir)
if not op.exists(image_dir):
os.makedirs(image_dir)
if not op.exists(thumb_dir):
os.makedirs(thumb_dir)
if not op.exists(source_dir):
os.makedirs(source_dir)
banner_data = []
toctree = ("\n\n"
".. toctree::\n"
" :hidden:\n\n")
contents = "\n\n"
# Write individual example files
for filename in glob.glob(op.join(source_dir, "*.py")):
ex = ExampleGenerator(filename, target_dir)
banner_data.append({"title": ex.pagetitle,
"url": op.join('examples', ex.htmlfilename),
"thumb": op.join(ex.thumbfilename)})
shutil.copyfile(filename, op.join(target_dir, ex.pyfilename))
output = RST_TEMPLATE.format(sphinx_tag=ex.sphinxtag,
docstring=ex.docstring,
end_line=ex.end_line,
fname=ex.pyfilename,
img_file=ex.pngfilename)
with open(op.join(target_dir, ex.rstfilename), 'w') as f:
f.write(output)
toctree += ex.toctree_entry()
contents += ex.contents_entry()
if len(banner_data) < 10:
banner_data = (4 * banner_data)[:10]
# write index file
index_file = op.join(target_dir, 'index.rst')
with open(index_file, 'w') as index:
index.write(INDEX_TEMPLATE.format(sphinx_tag="example_gallery",
toctree=toctree,
contents=contents))
def setup(app):
app.connect('builder-inited', main)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
| bsd-3-clause |
neogis-de/PPPP_utilities | pointcloud/patch_to_raster.py | 2 | 8028 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 23 16:38:01 2015
@author: remi
"""
import pg_pointcloud_classes as pc
reload(pc)
def translatePointArray(pt_arr, schema, pixel_size):
"""this function simply extract X,Y,Z and translate x and y and compute and allocate pixel array"""
import numpy as np
import math
x_column_indice = schema.getNameIndex('X')
y_column_indice = schema.getNameIndex('Y')
z_column_indice = schema.getNameIndex('Z')
pt_xyz = pt_arr[:, [x_column_indice, y_column_indice, z_column_indice]]
#print pt_xyz.shape
#find min max of x and y
x_max = np.nanmax(pt_xyz[:, 0], axis=0)[0]
x_min = np.nanmin(pt_xyz[:, 0], axis=0)[0]
y_max = np.nanmax(pt_xyz[:, 1], axis=0)[0]
y_min = np.nanmin(pt_xyz[:, 1], axis=0)[0]
bottom_left = [x_min - (x_min % pixel_size), y_min - (y_min % pixel_size)]
#print x_max,x_min,y_max,y_min
#translate points
#removing ceil(x_min, y_min)
pt_xyz[:, 0] -= x_min - (x_min % pixel_size)
pt_xyz[:, 1] -= y_min - (y_min % pixel_size)
x_max -= x_min - (x_min % pixel_size)
y_max -= y_min - (y_min % pixel_size)
#print pt_xyz[:,[0,1]] / 0.04
#find number of x_pixel, y_pixel
#this is ceil(x_max/pixel_size)
x_pix_number = math.ceil(x_max / pixel_size)
y_pix_number = math.ceil(y_max / pixel_size)
#print "pixel number on x :%s , y : %s" % (x_pix_number, y_pix_number)
#create a numpy array out of this
pixel_index_array = np.zeros([y_pix_number, x_pix_number], dtype=np.int)
pixel_index_array = pixel_index_array * float('NaN')
return pixel_index_array, pt_xyz, bottom_left
#del GD
def pointsToPixels(pixel_index_array, pt_xyz, pixel_size):
"""this function takes a list of points translated and assign the points index to a pixel array, depedning on
Z"""
#creating a temp Z buffer and an accum buffer
import numpy as np
import math
z_buf = np.zeros(pixel_index_array.shape, dtype = double);
z_buf = (z_buf+1) * float("inf")
accum = np.zeros(pixel_index_array.shape, dtype = int32);
#print z_buf
for i in range(0, pt_xyz.shape[0]):
#finding the pixel coordinates of this point floor(x/pixel_size)
x_pixel_index = math.floor(pt_xyz[i,0]/pixel_size)
y_pixel_index = math.floor(pt_xyz[i,1]/pixel_size)
if pt_xyz[i,2] < z_buf[y_pixel_index,x_pixel_index]:
accum[y_pixel_index,x_pixel_index] += 1
z_buf[y_pixel_index,x_pixel_index] = pt_xyz[i,2]
pixel_index_array[y_pixel_index,x_pixel_index] = i
return pixel_index_array, accum
def onePointToBandsArray(one_point, dim_name_index_dictionnary):
"""this is a custom function that will indicates how to compute the bands"""
import numpy as np
#print dim_name_index_dictionnary
dnd = dim_name_index_dictionnary
#for this application, we are interested in this :
#z-z_origin , reflectance, echo_range, deviation, accum
band_array = np.zeros(1,dtype = [
('relative_height',np.float32)
,('reflectance',np.float32)
#,('echo_range',np.float32)
#,('deviation',np.float32)
,('accumulation',np.uint16)
])
band_array[0][0] = one_point[dnd['z']] - one_point[dnd['z_origin']]
band_array[0][1] = one_point[dnd['reflectance']]
#band_array[0][2] = one_point[dnd['echo_range']]
#band_array[0][3] = one_point[dnd['deviation']]
band_array[0][2] = one_point[dnd['accumulation']]
return band_array
def print_matrix_band(matrix, band_name):
"""facility function to print one band of the matrix rperesenting the image"""
import matplotlib
import pylab as pl
#plt.imshow(pixel_index_array, origin='lower')
plt.imshow(matrix[:][:][band_name], origin='lower', interpolation='none') # note : origin necessary to get the image in correct order
def constructing_image_matrix(pt_arr, pixel_index_array, accum, schema, onePointToBandsArray):
"""this functions takes the list of points, and the matrix of index,\
and the function to compute band, and create and fill the final image matrix"""
import numpy.ma as ma
import numpy as np
nameIndex = schema.getNamesIndexesDictionnary()
#modifying the nameInde to ad an 'accum' at the last position
nameIndex['accumulation'] = pt_arr[0].shape
#creating an augmented point with added attribute 'accum'
augmented_point = np.append(pt_arr[0],accum[0,0] )
test_band = onePointToBandsArray(augmented_point, nameIndex)
#now we have the type of each dim of band
#Now we construct the final array (3D), with in 3D the values we want to write in band.
#getting the array type returned by the custom function
image_matrix = np.zeros(pixel_index_array.shape , dtype = test_band.dtype)
image_matrix = image_matrix.view(ma.MaskedArray)
image_matrix.mask = True
#setting the Nan value to min possible for int, or Nan for float
#filling this matrix with actual values
for x in range(0, image_matrix.shape[1]):
for y in range(0, image_matrix.shape[0]):
if np.isnan(pixel_index_array[y,x])==False:
image_matrix[y,x] = onePointToBandsArray(\
np.append(pt_arr[pixel_index_array[y,x]], accum[y,x])
, nameIndex)
#print_matrix_band(image_matrix,'reflectance')
return image_matrix
def patchToNumpyMatrix(pt_arr, schema, pixel_size):
"""main function converting a double array representing points to a matrix representing a multiband image"""
import numpy_to_gdal as n2g;
#prepare data stgructure for computing and prepare points
pixel_index_array, pt_xy, bottom_left = translatePointArray(pt_arr, schema, pixel_size)
#put points into pixel
pixel_index_array, accum = pointsToPixels(pixel_index_array, pt_xy, pixel_size)
image_matrix = constructing_image_matrix(pt_arr, pixel_index_array, accum, schema, onePointToBandsArray)
#creating an object to store all meta data
#band_name =
multi_band_image = n2g.numpy_multi_band_image()
print ' srtext : %s ' % schema.srtext
multi_band_image.setAttributes(\
image_matrix, bottom_left, pixel_size, image_matrix[0, 0].dtype.names, schema.srtext)
return multi_band_image
def testModule():
import numpy as np
import numpy_to_gdal as n2g
reload(n2g)
pixel_size = 0.04
pt_arr, schema = getTestPoints()
multi_band_image = patchToNumpyMatrix(pt_arr, schema, pixel_size)
print 'here is the array band %s' % multi_band_image.pixel_matrix[1,1]
#using the conversion to gdal
n2g.test_module(multi_band_image)
def getTestPoints():
import psycopg2 as psy
import pg_pointcloud_classes as pc
connection_string = """dbname=test_pointcloud user=postgres password=postgres port=5433"""
if 'GD' not in globals():
global GD
GD = {}
if 'rc' not in GD: # creating the rc dict if necessary
GD['rc'] = dict()
if 'schemas' not in GD['rc']: # creating the schemas dict if necessary
GD['rc']['schemas'] = dict()
#get a patch from base
conn = psy.connect(connection_string)
cur = conn.cursor()
cur.execute("""
SELECT gid, pc_uncompress(patch)
FROM --acquisition_tmob_012013.riegl_pcpatch_space
benchmark_cassette_2013.riegl_pcpatch_space
WHERE PC_NumPoints(patch) between 5000 and 10000
LIMIT 1
""");
result = cur.fetchone()
print 'patch found : %s'% result[0]
b_patch = result[1]
conn.commit()
cur.close()
conn.close()
#(pts_arr, (mschema,endianness, compression, npoints)) = pc.patch_string_buff_to_numpy(b_patch, GD['rc']['schemas'], connection_string)
pts_arr, schema = pc.WKB_patch_to_numpy_double(b_patch, GD['rc']['schemas'], connection_string)
return pts_arr, schema
import pg_pointcloud_classes as pc
pc.create_GD_if_not_exists()
testModule()
| lgpl-3.0 |
olinguyen/shogun | examples/undocumented/python/graphical/interactive_clustering_demo.py | 5 | 11271 | """
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from shogun import *
from shogun import *
from shogun import *
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'o', color='0.7')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'o', color='0.5')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
if event.button==1:
label = 1.0
else:
label = -1.0
self.data.add_example(event.xdata, event.ydata, label)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
self.k.setEnabled(True)
def train_svm(self):
k = int(self.k.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1_pos, self.data.x2_pos, 'ko')
self.axes.plot(self.data.x1_neg, self.data.x2_neg, 'ko')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = BinaryLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
distance_name = self.distance_combo.currentText()
if distance_name == "EuclideanDistance":
distance=EuclideanDistance(train, train)
elif distance_name == "ManhattanMetric":
distance=ManhattanMetric(train, train)
elif distance_name == "JensenMetric":
distance=JensenMetric(train, train)
kmeans=KMeans(k, distance)
kmeans.train()
centers = kmeans.get_cluster_centers()
radi=kmeans.get_radiuses()
self.axes.plot(features[0,labels==+1], features[1,labels==+1],'ro')
self.axes.plot(features[0,labels==-1], features[1,labels==-1],'bo')
for i in xrange(k):
self.axes.plot(centers[0,i],centers[1,i],'kx', markersize=20, linewidth=5)
t = numpy.linspace(0, 2*numpy.pi, 100)
self.axes.plot(radi[i]*numpy.cos(t)+centers[0,i],radi[i]*numpy.sin(t)+centers[1,i],'k-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
k_label = QLabel('Number of Clusters')
self.k = QLineEdit()
self.k.setText("2")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(k_label)
spins_hbox.addWidget(self.k)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Cluster!")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.distance_combo = QComboBox()
self.distance_combo.insertItem(-1, "EuclideanDistance")
self.distance_combo.insertItem(-1, "ManhattanMetric")
self.distance_combo.insertItem(-1, "JensenMetric")
self.distance_combo.maximumSize = QSize(300, 50)
self.connect(self.distance_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.distance_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1_pos = []
self.x2_pos = []
self.x1_neg = []
self.x2_neg = []
def get_stats(self):
num_neg = len(self.x1_neg)
num_pos = len(self.x1_pos)
str_neg = "num negative examples: %i" % num_neg
str_pos = "num positive examples: %i" % num_pos
return (str_neg, str_pos)
def get_labels(self):
return numpy.array([1]*len(self.x1_pos) + [-1]*len(self.x1_neg), dtype=numpy.float64)
def get_examples(self):
num_pos = len(self.x1_pos)
num_neg = len(self.x1_neg)
examples = numpy.zeros((2,num_pos+num_neg))
for i in xrange(num_pos):
examples[0,i] = self.x1_pos[i]
examples[1,i] = self.x2_pos[i]
for i in xrange(num_neg):
examples[0,i+num_pos] = self.x1_neg[i]
examples[1,i+num_pos] = self.x2_neg[i]
return examples
def add_example(self, x1, x2, label):
if label==1:
self.x1_pos.append(x1)
self.x2_pos.append(x2)
else:
self.x1_neg.append(x1)
self.x2_neg.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| gpl-3.0 |
domagalski/pocketcorr | scripts/pocketcorr_adc.py | 1 | 9505 | #!/usr/bin/env python2
################################################################################
## This script is for simple ADC caputer to test the a pocket correlator.
## Copyright (C) 2014 Rachel Simone Domagalski: domagalski@berkeley.edu
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## ## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import sys
import argparse
import numpy as np
import pocketcorr as pc
from numpy import fft
#BRAM_SIZE = 4 << 11
BRAM_SIZE = 4 << 10
BRAM_WIDTH = 32
NBITS = 8
class ADC(pc.POCO):
def adc_read(self, start_pol, demux=1, capture='adc'):
"""
Read the time domain signals out of a BRAM.
"""
#XXX need to do demux2 eq blocks
# Read the register containing the ADC captures.
if demux == 2 and capture == 'pfb':
names = ['pfb_real', 'pfb_imag']
# XXX data type should be <i4 after recompile
real = np.fromstring(self.read(names[0], BRAM_SIZE), '>i4')
imag = np.fromstring(self.read(names[1], BRAM_SIZE), '>i4')
pfb_read = np.zeros(BRAM_SIZE / 4, dtype=np.complex64)
pfb_read.real = real# map(twos_comp, real)
pfb_read.imag = imag#map(twos_comp, imag)
return pfb_read
else:
read_size = BRAM_SIZE
nbits = demux*NBITS
npols = BRAM_WIDTH/nbits
first = str(start_pol)
last = str(start_pol + npols - 1)
adc = capture + '_'*int(demux>1)
# I feel one day I'm going to look back on this and shake my head.
if self.poco == 'spoco12':
if adc == 'pfb':
print 'ERROR: This is messed up on the FPGA.'
sys.exit(1)
elif adc == 'fft':
last = str(start_pol + 6)
read_size *= 2
npols /= 2
elif adc == 'eq':
last = str(start_pol + 1)
read_size *= 2
adc += '_cap_'
# There is a sync pulse somewhere in the data
if adc[:2] == 'eq' or adc[:3] == 'fft':
sync = self.read(adc + 'sync', read_size).find(chr(1)) / 4
concat = self.read(adc + '_'.join([first, last]), read_size)
# Get the formatting for the data.
if adc[:3] != 'fft':
shape = (read_size/(npols*demux), npols*demux)
fmt = '>i1'
else:
shape = (read_size/(npols*demux*2), npols*demux)
fmt = '>i2'
# Parse the data into usable values.
adc_read = np.fromstring(concat, fmt).reshape(*shape)
if adc[:2] == 'eq' or adc[:3] == 'fft':
adc_read = adc_read[sync:sync+adc_read.shape[0]/2]
adc_read = list(adc_read.transpose()[::-1])
if adc[:3] == 'fft':
adc_read = adc_read[0] + 1j*adc_read[1]
split = len(adc_read)/2
adc_read = [adc_read[:split], adc_read[split:]]
if demux == 2:
adc_read = [np.r_[adc_read[2*i],adc_read[2*i+1]]
for i in range(len(adc_read)/2)]
for i in range(len(adc_read)):
reordered = np.copy(adc_read[i]).reshape(2, shape[0])
reordered = reordered.transpose().flatten()
adc_read[i] = np.copy(reordered)
# Return the data as a dictionary.
if capture == 'adc_cap':
capture = 'adc'
names = [capture + str(i) for i in range(start_pol, start_pol+npols)]
if adc[:3] == 'fft':
names = [capture + str(i) for i in [start_pol, start_pol+6]]
return zip(names, adc_read)
def twos_comp(num32, nbits=18):
"""
Perform the two-s compiment of some n-bit number.
"""
bit_sel = 2**nbits - 1
neg_bit = 1 << nbits - 1
num32 = num32 & bit_sel
if num32 & neg_bit:
return -(((1 << 32) - num32) & bit_sel)
else:
return num32
if __name__ == '__main__':
# Grab options from the command line
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--ip-roach', dest='roach', required=True,
help='Hostname/ip address of the ROACH.')
parser.add_argument('-N', '--npol',
default=8,
type=int,
help='Number of antennas in the rpoco design.')
parser.add_argument('-c', '--capture',
help='Block to capture from (SNAP only)')
parser.add_argument('-d', '--demux', default=1, type=int,
help='Demux mode of the ADC.')
parser.add_argument('-o', '--output-file',
dest='outfile',
help='NPZ file to save data to.')
parser.add_argument('-a', '--antennas', nargs='+',
help='Antennas to plot.')
parser.add_argument('-f', '--fft', action='store_true',
help='Run an FFT on the data.')
parser.add_argument('-S', '--samp-rate', default=200e6, type=float,
help='Samping rate of the ADC (for plots).')
args = parser.parse_args()
# Make sure that the user specified something to do.
if args.outfile is None and args.antennas is None:
print 'ERROR: Nothing to do.'
sys.exit(1)
if args.outfile is not None and args.antennas is None and args.fft:
print 'ERROR: This script only stores raw data.'
sys.exit(1)
# Connect to the ROACH.
poco = ADC(args.roach)
poco.wait_connected()
spoco12 = False
modelist = pc.mode_int2list(poco.read_int('ping'))
if modelist[0] == 'snap' and modelist[3] == 12:
spoco12 = True
poco.poco = 'spoco12'
if args.demux == 1 and args.capture is None:
if spoco12:
cap = 'adc'
else:
cap = 'new_raw'
else:
cap = args.capture
if spoco12: # See the else for description of the sequence
poco.write_int(cap + '_cap_raw_trig', 1)
poco.write_int(cap + '_cap_raw', 1)
poco.write_int(cap + '_cap_raw', 0)
poco.write_int(cap + '_cap_raw_trig', 1)
else:
# Enable the ADC capture
poco.write_int(cap + '_capture_trig', 1)
# capture the ADC.
poco.write_int(cap + '_capture', 1)
poco.write_int(cap + '_capture', 0)
# Turn off ADC capture.
poco.write_int(cap + '_capture_trig', 0)
# Collect data and store it as a dictionary
adc_capture = []
nbits = args.demux * NBITS
if cap == 'pfb' and not spoco12:
pfb_capture = poco.adc_read(0, args.demux, cap)
else:
npol = args.npol
step_size = BRAM_SIZE/nbits
if cap == 'eq' or cap == 'fft':
npol /= 2
step_size = 1
for i in range(0, npol, step_size):
adc_capture += poco.adc_read(i, args.demux, cap)
adc_capture = dict(adc_capture)
# Now we either save or plot the data.
if args.outfile is not None:
np.savez(args.outfile, **adc_capture)
# Set this for plotting
#if args.demux == 1 and args.capture is None:
# cap = 'adc'
if args.antennas is not None:
import matplotlib.pyplot as plt
if cap == 'pfb' and not spoco12:
plt.plot(np.abs(pfb_capture)**2)
else:
time_axis = np.arange(BRAM_SIZE*nbits/BRAM_WIDTH) * 1e6 / args.samp_rate
freq_axis = fft.fftfreq(len(time_axis), 1e6 / args.samp_rate)
freq_axis = freq_axis[:len(freq_axis)/2] # ADC data is real
for ant in args.antennas:
plt.figure()
name = cap + ant
sample = adc_capture[name]
if args.fft or cap == 'fft':
if args.fft:
pspec = np.abs(fft.fft(sample)[:len(sample)/2])**2
pspec = 10*np.log10(pspec / np.max(pspec))
plt.plot(freq_axis, pspec)
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (dB)')
else:
pspec = np.abs(sample)**2
plt.plot(pspec)
plt.xlabel('Frequency (chan)')
plt.ylabel('Power')
plt.title(name)
#plt.axis([0, freq_axis[-1], np.min(pspec), 0])
else:
plt.plot(time_axis, sample)
plt.xlabel('Time ($\mu s$)')
plt.ylabel('Amplitude (ADU)')
plt.title(name)
plt.axis([0, time_axis[-1], np.min(sample)-2, np.max(sample)+2])
plt.show()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.