repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
alan-unravel/bokeh | bokeh/crossfilter/plotting.py | 42 | 8763 | from __future__ import absolute_import
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, BoxSelectTool
from ..plotting import figure
def cross(start, facets):
"""Creates a unique combination of provided facets.
A cross product of an initial set of starting facets with a new set of
facets.
Args:
start (list): List of lists of facets
facets (list): List of facets
Returns:
list: a list of lists of unique combinations of facets
"""
new = [[facet] for facet in facets]
result = []
for x in start:
for n in new:
result.append(x + n)
return result
def hide_axes(plot, axes=('x', 'y')):
"""Hides the axes of the plot by setting component alphas.
Args:
plot (Figure): a valid figure with x and y axes
axes (tuple or list or str, optional): the axes to hide the axis on.
"""
if isinstance(axes, str):
axes = tuple(axes)
for label in axes:
axis = getattr(plot, label + 'axis')
axis = axis[0]
axis.major_label_text_alpha = 0.0
axis.major_label_text_font_size = '0pt'
axis.axis_line_alpha = 0.0
axis.major_tick_line_alpha = 0.0
axis.minor_tick_line_alpha = 0.0
plot.min_border = 0
def make_histogram_source(series):
"""Creates a ColumnDataSource containing the bins of the input series.
Args:
series (:py:class:`~pandas.Series`): description
Returns:
ColumnDataSource: includes bin centers with count of items in the bins
"""
counts, bins = np.histogram(series, bins=50)
centers = pd.rolling_mean(bins, 2)[1:]
return ColumnDataSource(data={'counts': counts, 'centers': centers})
def make_continuous_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):
"""Makes discrete, then creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str, optional): the column in df that maps to the y dim of the plot
df_orig (DataFrame, optional): original dataframe that the subset ``df`` was
generated from
agg (str, optional): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
# Generate dataframe required to use the categorical bar source function
idx, edges = pd.cut(x=df[x_field], bins=8, retbins=True, labels=False)
labels, edges = pd.cut(x=df[x_field], bins=8, retbins=True)
centers = pd.rolling_mean(edges, 2)[1:]
# store new value of x as the bin it fell into
df['centers'] = centers[idx]
df['labels'] = labels
# After making it discrete, create the categorical bar source
return make_categorical_bar_source(df, 'labels', y_field, df_orig, agg)
def make_categorical_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):
"""Creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str, optional): the column in df that maps to the y dim of the plot
df_orig (DataFrame, optional): original dataframe that the subset ``df`` was
generated from
agg (str, optional): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
if df_orig is None:
df_orig = df
# handle x-only aggregations separately
if agg == 'percent' or agg == 'count':
# percent aggregations are a special case, since pandas doesn't directly support
if agg == 'percent':
# percent on discrete col using proportion, on continuous using percent
if df[y_field].dtype == 'object':
agg_func = 'count'
else:
agg_func = 'sum'
total = float(getattr(df_orig[y_field], agg_func)())
series = df.groupby(x_field)[y_field].apply(lambda x, total_agg=total, f=agg_func:
100*(getattr(x, f)()/total_agg))
elif agg == 'count':
series = df.groupby(x_field).size()
else:
raise ValueError('Unrecognized Aggregation Type for Y of "None"')
# here we have a series where the values are the aggregation for the index (bars)
result = pd.DataFrame(data={'labels': series.index, 'heights': series.values})
# x and y aggregations
else:
# Get the y values after grouping by the x values
group = df.groupby(x_field)[y_field]
aggregate = getattr(group, agg)
result = aggregate().reset_index()
result.rename(columns={x_field: 'labels', y_field: 'heights'}, inplace=True)
return ColumnDataSource(data=result)
def make_factor_source(series):
"""Generate data source that is based on the unique values in the series.
Args:
series (:py:class:`~pandas.Series`): contains categorical-like data
Returns:
ColumnDataSource: contains the unique values from the series
"""
return ColumnDataSource(data={'factors': series.unique()})
def make_bar_plot(datasource, counts_name="counts",
centers_name="centers",
bar_width=0.7,
x_range=None,
y_range=None,
plot_width=500, plot_height=500,
tools="pan,wheel_zoom,box_zoom,save,resize,box_select,reset",
title_text_font_size="12pt"):
"""Utility function to set/calculate default parameters of a bar plot.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
bar_width (float): the width of the bars in the bar plot
x_range (list): list of two values, the min and max of the x axis range
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: plot generated from the provided parameters
"""
top = np.max(datasource.data[counts_name])
# Create the figure container
plot = figure(
title="", title_text_font_size=title_text_font_size,
plot_width=plot_width, plot_height=plot_height,
x_range=x_range, y_range=[0, top], tools=tools)
# Get the bar values
y = [val/2.0 for val in datasource.data[counts_name]]
# Generate the bars in the figure
plot.rect(centers_name, y, bar_width, counts_name, source=datasource)
plot.min_border = 0
plot.h_symmetry = False
plot.v_symmetry = False
for tool in plot.select(type=BoxSelectTool):
tool.dimensions = ['width']
return plot
def make_histogram(datasource,
counts_name="counts",
centers_name="centers",
x_range=None,
bar_width=0.7,
plot_width=500,
plot_height=500,
min_border=40,
tools=None,
title_text_font_size="12pt"):
"""Utility function to create a histogram figure.
This is used to create the filter widgets for continuous data in
CrossFilter.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
x_range (list): list of two values, the min and max of the x axis range
bar_width (float): the width of the bars in the bar plot
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
min_border (float): minimum border width of figure in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: histogram plot generated from the provided parameters
"""
start = np.min(datasource.data[centers_name]) - bar_width
end = np.max(datasource.data[centers_name]) - bar_width
plot = make_bar_plot(
datasource, counts_name=counts_name, centers_name=centers_name,
x_range=[start, end], plot_width=plot_width, plot_height=plot_height,
tools=tools, title_text_font_size=title_text_font_size)
return plot
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/frame/test_misc_api.py | 7 | 16059 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from copy import deepcopy
import sys
import nose
from distutils.version import LooseVersion
from pandas.compat import range, lrange
from pandas import compat
from numpy.random import randn
import numpy as np
from pandas import DataFrame, Series
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class SharedWithSparse(object):
_multiprocess_can_split_ = True
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
for attr in ('index', 'columns'):
ind = getattr(self.frame, attr)
ind.name = None
cp = self.frame.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.frame, attr).name)
def test_getitem_pop_assign_name(self):
s = self.frame['A']
self.assertEqual(s.name, 'A')
s = self.frame.pop('A')
self.assertEqual(s.name, 'A')
s = self.frame.ix[:, 'B']
self.assertEqual(s.name, 'B')
s2 = s.ix[:]
self.assertEqual(s2.name, 'B')
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
tm.assert_almost_equal(result, expected)
def test_join_index(self):
# left / right
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2)
self.assert_index_equal(f.index, joined.index)
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='left')
self.assert_index_equal(joined.index, f.index)
self.assertEqual(len(joined.columns), 4)
joined = f.join(f2, how='right')
self.assert_index_equal(joined.index, f2.index)
self.assertEqual(len(joined.columns), 4)
# inner
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='inner')
self.assert_index_equal(joined.index, f.index.intersection(f2.index))
self.assertEqual(len(joined.columns), 4)
# outer
f = self.frame.reindex(columns=['A', 'B'])[:10]
f2 = self.frame.reindex(columns=['C', 'D'])
joined = f.join(f2, how='outer')
self.assertTrue(tm.equalContents(self.frame.index, joined.index))
self.assertEqual(len(joined.columns), 4)
assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with assertRaisesRegexp(ValueError, 'columns overlap but '
'no suffix'):
self.frame.join(self.frame, how=how)
def test_join_index_more(self):
af = self.frame.ix[:, ['A', 'B']]
bf = self.frame.ix[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = self.frame['C'][::2]
expected['D'] = self.frame['D'][::2]
result = af.join(bf)
assert_frame_equal(result, expected)
result = af.join(bf, how='right')
assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
assert_frame_equal(result, expected.ix[:, result.columns])
def test_join_index_series(self):
df = self.frame.copy()
s = df.pop(self.frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
assert_frame_equal(joined, self.frame, check_names=False)
s.name = None
assertRaisesRegexp(ValueError, 'must have a name', df.join, s)
def test_join_overlap(self):
df1 = self.frame.ix[:, ['A', 'B', 'C']]
df2 = self.frame.ix[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')
no_overlap = self.frame.ix[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
assert_frame_equal(joined, expected.ix[:, joined.columns])
def test_add_prefix_suffix(self):
with_prefix = self.frame.add_prefix('foo#')
expected = pd.Index(['foo#%s' % c for c in self.frame.columns])
self.assert_index_equal(with_prefix.columns, expected)
with_suffix = self.frame.add_suffix('#foo')
expected = pd.Index(['%s#foo' % c for c in self.frame.columns])
self.assert_index_equal(with_suffix.columns, expected)
class TestDataFrameMisc(tm.TestCase, SharedWithSparse, TestData):
klass = DataFrame
_multiprocess_can_split_ = True
def test_get_axis(self):
f = self.frame
self.assertEqual(f._get_axis_number(0), 0)
self.assertEqual(f._get_axis_number(1), 1)
self.assertEqual(f._get_axis_number('index'), 0)
self.assertEqual(f._get_axis_number('rows'), 0)
self.assertEqual(f._get_axis_number('columns'), 1)
self.assertEqual(f._get_axis_name(0), 'index')
self.assertEqual(f._get_axis_name(1), 'columns')
self.assertEqual(f._get_axis_name('index'), 'index')
self.assertEqual(f._get_axis_name('rows'), 'index')
self.assertEqual(f._get_axis_name('columns'), 'columns')
self.assertIs(f._get_axis(0), f.index)
self.assertIs(f._get_axis(1), f.columns)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)
assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')
assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)
assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number,
None)
def test_keys(self):
getkeys = self.frame.keys
self.assertIs(getkeys(), self.frame.columns)
def test_column_contains_typeerror(self):
try:
self.frame.columns in self.frame
except TypeError:
pass
def test_not_hashable(self):
df = pd.DataFrame([1])
self.assertRaises(TypeError, hash, df)
self.assertRaises(TypeError, hash, self.empty)
def test_new_empty_index(self):
df1 = DataFrame(randn(0, 3))
df2 = DataFrame(randn(0, 3))
df1.index.name = 'foo'
self.assertIsNone(df2.index.name)
def test_array_interface(self):
with np.errstate(all='ignore'):
result = np.sqrt(self.frame)
tm.assertIsInstance(result, type(self.frame))
self.assertIs(result.index, self.frame.index)
self.assertIs(result.columns, self.frame.columns)
assert_frame_equal(result, self.frame.apply(np.sqrt))
def test_get_agg_axis(self):
cols = self.frame._get_agg_axis(0)
self.assertIs(cols, self.frame.columns)
idx = self.frame._get_agg_axis(1)
self.assertIs(idx, self.frame.index)
self.assertRaises(ValueError, self.frame._get_agg_axis, 2)
def test_nonzero(self):
self.assertTrue(self.empty.empty)
self.assertFalse(self.frame.empty)
self.assertFalse(self.mixed_frame.empty)
# corner case
df = DataFrame({'A': [1., 2., 3.],
'B': ['a', 'b', 'c']},
index=np.arange(3))
del df['A']
self.assertFalse(df.empty)
def test_iteritems(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
for k, v in compat.iteritems(df):
self.assertEqual(type(v), Series)
def test_iter(self):
self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))
def test_iterrows(self):
for i, (k, v) in enumerate(self.frame.iterrows()):
exp = self.frame.xs(self.frame.index[i])
assert_series_equal(v, exp)
for i, (k, v) in enumerate(self.mixed_frame.iterrows()):
exp = self.mixed_frame.xs(self.mixed_frame.index[i])
assert_series_equal(v, exp)
def test_itertuples(self):
for i, tup in enumerate(self.frame.itertuples()):
s = Series(tup[1:])
s.name = tup[0]
expected = self.frame.ix[i, :].reset_index(drop=True)
assert_series_equal(s, expected)
df = DataFrame({'floats': np.random.randn(5),
'ints': lrange(5)}, columns=['floats', 'ints'])
for tup in df.itertuples(index=False):
tm.assertIsInstance(tup[1], np.integer)
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [
(0, 1, 1), (1, 2, 2), (2, 3, 3)])
self.assertEqual(repr(list(df.itertuples(name=None))),
'[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')
tup = next(df.itertuples(name='TestName'))
# no support for field renaming in Python 2.6, regular tuples are
# returned
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup._fields, ('Index', 'a', 'b'))
self.assertEqual((tup.Index, tup.a, tup.b), tup)
self.assertEqual(type(tup).__name__, 'TestName')
df.columns = ['def', 'return']
tup2 = next(df.itertuples(name='TestName'))
self.assertEqual(tup2, (0, 1, 4))
if sys.version >= LooseVersion('2.7'):
self.assertEqual(tup2._fields, ('Index', '_1', '_2'))
df3 = DataFrame(dict(('f' + str(i), [i]) for i in range(1024)))
# will raise SyntaxError if trying to create namedtuple
tup3 = next(df3.itertuples())
self.assertFalse(hasattr(tup3, '_fields'))
self.assertIsInstance(tup3, tuple)
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
def test_as_matrix(self):
frame = self.frame
mat = frame.as_matrix()
frameCols = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frameCols[j]
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][i]))
else:
self.assertEqual(value, frame[col][i])
# mixed type
mat = self.mixed_frame.as_matrix(['foo', 'A'])
self.assertEqual(mat[0, 0], 'bar')
df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})
mat = df.as_matrix()
self.assertEqual(mat[0, 0], 1j)
# single block corner case
mat = self.frame.as_matrix(['A', 'B'])
expected = self.frame.reindex(columns=['A', 'B']).values
assert_almost_equal(mat, expected)
def test_values(self):
self.frame.values[:, 0] = 5.
self.assertTrue((self.frame.values[:, 0] == 5).all())
def test_deepcopy(self):
cp = deepcopy(self.frame)
series = cp['A']
series[:] = 10
for idx, value in compat.iteritems(series):
self.assertNotEqual(self.frame['A'][idx], value)
# ---------------------------------------------------------------------
# Transposing
def test_transpose(self):
frame = self.frame
dft = frame.T
for idx, series in compat.iteritems(dft):
for col, value in compat.iteritems(series):
if np.isnan(value):
self.assertTrue(np.isnan(frame[col][idx]))
else:
self.assertEqual(value, frame[col][idx])
# mixed type
index, data = tm.getMixedTypeDict()
mixed = DataFrame(data, index=index)
mixed_T = mixed.T
for col, s in compat.iteritems(mixed_T):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
dft = self.frame.T
dft.values[:, 5:10] = 5
self.assertTrue((self.frame.values[5:10] == 5).all())
def test_swapaxes(self):
df = DataFrame(np.random.randn(10, 5))
assert_frame_equal(df.T, df.swapaxes(0, 1))
assert_frame_equal(df.T, df.swapaxes(1, 0))
assert_frame_equal(df, df.swapaxes(0, 0))
self.assertRaises(ValueError, df.swapaxes, 2, 5)
def test_axis_aliases(self):
f = self.frame
# reg name
expected = f.sum(axis=0)
result = f.sum(axis='index')
assert_series_equal(result, expected)
expected = f.sum(axis=1)
result = f.sum(axis='columns')
assert_series_equal(result, expected)
def test_more_asMatrix(self):
values = self.mixed_frame.as_matrix()
self.assertEqual(values.shape[1], len(self.mixed_frame.columns))
def test_repr_with_mi_nat(self):
df = DataFrame({'X': [1, 2]},
index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])
res = repr(df)
exp = ' X\nNaT a 1\n2013-01-01 b 2'
self.assertEqual(res, exp)
def test_iterkv_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
self.mixed_float.iterkv()
def test_iterkv_names(self):
for k, v in compat.iteritems(self.mixed_frame):
self.assertEqual(v.name, k)
def test_series_put_names(self):
series = self.mixed_frame._series
for k, v in compat.iteritems(series):
self.assertEqual(v.name, k)
def test_empty_nonzero(self):
df = DataFrame([1, 2, 3])
self.assertFalse(df.empty)
df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()
self.assertTrue(df.empty)
self.assertTrue(df.T.empty)
def test_inplace_return_self(self):
# re #1893
data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],
'b': [0, 0, 1, 1],
'c': [1, 2, 3, 4]})
def _check_f(base, f):
result = f(base)
self.assertTrue(result is None)
# -----DataFrame-----
# set_index
f = lambda x: x.set_index('a', inplace=True)
_check_f(data.copy(), f)
# reset_index
f = lambda x: x.reset_index(inplace=True)
_check_f(data.set_index('a'), f)
# drop_duplicates
f = lambda x: x.drop_duplicates(inplace=True)
_check_f(data.copy(), f)
# sort
f = lambda x: x.sort_values('b', inplace=True)
_check_f(data.copy(), f)
# sort_index
f = lambda x: x.sort_index(inplace=True)
_check_f(data.copy(), f)
# sortlevel
f = lambda x: x.sortlevel(0, inplace=True)
_check_f(data.set_index(['a', 'b']), f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(data.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(data.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(data.copy(), f)
# -----Series-----
d = data.copy()['c']
# reset_index
f = lambda x: x.reset_index(inplace=True, drop=True)
_check_f(data.set_index('a')['c'], f)
# fillna
f = lambda x: x.fillna(0, inplace=True)
_check_f(d.copy(), f)
# replace
f = lambda x: x.replace(1, 0, inplace=True)
_check_f(d.copy(), f)
# rename
f = lambda x: x.rename({1: 'foo'}, inplace=True)
_check_f(d.copy(), f)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
jenshnielsen/HJCFIT | documentation/source/conf.py | 1 | 10215 | # -*- coding: utf-8 -*-
#
# DCProgs documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 31 17:46:57 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath("@PYINSTALL_DIR@"))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'] + [@SPHINX_EXTENSIONS@]
# Add any paths that contain templates here, relative to this directory.
templates_path = [os.path.join('@SPHINX_SOURCE_DIR@', '_templates')]
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'@PROJECT_NAME@'
copyright = u'2013-2016, University College London'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [os.path.join('@SPHINX_SOURCE_DIR@', '_theme')]
if len("@SPHINX_THEME_DIR@"): html_theme_path.append("@SPHINX_THEME_DIR@")
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [os.path.join('@SPHINX_SOURCE_DIR@', '_static')]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '@PROJECT_NAME@doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', '@PROJECT_NAME@.tex', u'@PROJECT_NAME@ Documentation',
u'Mayeul d\'Avezac', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dcprogs', u'@PROJECT_NAME@ Documentation',
[u'Mayeul d\'Avezac'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '@PROJECT_NAME@', u'@PROJECT_NAME@ Documentation',
u'Mayeul d\'Avezac', '@PROJECT_NAME@', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'@PROJECT_NAME@'
epub_author = u'Mayeul d\'Avezac'
epub_publisher = u'Mayeul d\'Avezac'
epub_copyright = u'2013-2016, University College London'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
breathe_projects = {
"@PROJECT_NAME@": os.path.join('@PROJECT_BINARY_DIR@',
"documentation", "xml"),
}
breathe_default_project = "@PROJECT_NAME@"
rst_epilog = """
.. _scipy: http://www.scipy.org/
.. _matplotlib: http://matplotlib.org/
.. _ipython: http://ipython.org/
.. _eigen: http://eigen.tuxfamily.org/index.php?title=Main_Page
"""
def setup(app):
app.add_config_value('python_bindings', "@pythonBindings@", True)
app.add_config_value('DCPROGS_USE_MPFR',
@SPHINX_DCPROGS_USE_MPFR@, 'env')
python_bindings = "@pythonBindings@"
spelling_word_list_filename = os.path.join("@SPHINX_SOURCE_DIR@",
"spelling_wordlist.txt")
| gpl-3.0 |
flightgong/scikit-learn | sklearn/neighbors/base.py | 1 | 23514 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import safe_asarray, atleast2d_or_csr, check_arrays
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, **kwargs):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_kwds = kwargs
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric in ['wminkowski', 'minkowski']:
self.metric_kwds['p'] = p
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
self.effective_metric_ = self.metric
self.effective_metric_kwds_ = self.metric_kwds
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
self.effective_metric_kwds_ = self.metric_kwds.copy()
p = self.effective_metric_kwds_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_kwds_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = atleast2d_or_csr(X, copy=False)
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_kwds_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_kwds_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_kwds_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = safe_asarray(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_kwds_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = safe_asarray(X)
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
bijanfallah/OI_CCLM | src/RMSE_SPREAD_MAPS.py | 1 | 15003 | # Program to show the maps of RMSE averaged over time
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import os
from netCDF4 import Dataset as NetCDFFile
import numpy as np
from CCLM_OUTS import Plot_CCLM
# option == 1 -> shift 4 with default cclm domain and nboundlines = 3
# option == 2 -> shift 4 with smaller cclm domain and nboundlines = 3
# option == 3 -> shift 4 with smaller cclm domain and nboundlines = 6
# option == 4 -> shift 4 with corrected smaller cclm domain and nboundlines = 3
# option == 5 -> shift 4 with corrected smaller cclm domain and nboundlines = 4
# option == 6 -> shift 4 with corrected smaller cclm domain and nboundlines = 6
# option == 7 -> shift 4 with corrected smaller cclm domain and nboundlines = 9
# option == 8 -> shift 4 with corrected bigger cclm domain and nboundlines = 3
from CCLM_OUTS import Plot_CCLM
def read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member/post/',name='member_T_2M_ts_seasmean.nc',var='T_2M'):
# type: (object, object, object) -> object
#a function to read the data from mistral work
"""
:rtype: object
"""
CMD = 'scp $mistral:' + dir + name + ' ./'
os.system(CMD)
nc = NetCDFFile(name)
os.remove(name)
lats = nc.variables['lat'][:]
lons = nc.variables['lon'][:]
t = nc.variables[var][:].squeeze()
rlats = nc.variables['rlat'][:] # extract/copy the data
rlons = nc.variables['rlon'][:]
nc.close()
return(t, lats, lons, rlats, rlons)
def calculate_MAPS_RMSE_of_the_member(member='1', buffer=4, option=0):
# function to cut the area and calculate RMSE
# buffer is the number of grid points to be skipeed
#
pdf = 'RMSE_Patterns_'
if option == 1:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member/post/',
name='member_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member0' + str(member) + '/post/',
name='member0' + str(member) + '_T_2M_ts_monmean_1995.nc',
var='T_2M')
#pdf_name="Figure_" +pdf+ str(member)+ "_"+str(buf)+"_Default.pdf"
pdf_name="RMSE_"+"_Default.pdf"
if option == 2:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member/post/',
name='member_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member0' + str(member) + '/post/',
name='member0' + str(member) + '_T_2M_ts_monmean_1995.nc',
var='T_2M')
pdf_name="Figure_" +pdf+ str(member)+ "_"+str(buf)+"_Small.pdf"
if option == 3:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member/post/',
name='member_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member0' + str(member) + '_relax/post/',
name='member0' + str(member) + '_relax_T_2M_ts_monmean_1995.nc',
var='T_2M')
pdf_name="Figure_" +pdf+ str(member)+ "_"+str(buf)+"_Small_relax6.pdf"
if option == 4:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member_relax_0_small/post/',
name='member_relax_0_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member0' + str(member) + '_relax_0_small/post/',
name='member0' + str(member) + '_relax_0_T_2M_ts_monmean_1995.nc',
var='T_2M')
#pdf_name = "Figure_" + pdf + str(member) + "_" + str(buf) + "relax_0_small.pdf"
pdf_name = "Figure03_RMSE.pdf"
if option == 5:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member_relax_4_small/post/',
name='member_relax_4_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member0' + str(member) + '_relax_4_small/post/',
name='member0' + str(member) + '_relax_4_T_2M_ts_monmean_1995.nc',
var='T_2M')
# pdf_name = "Figure_" + pdf + str(member) + "_" + str(buf) + "relax_9_small.pdf"
pdf_name="Figure04_RMSE.pdf"
if option == 6:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member_relax_6_small/post/',
name='member_relax_6_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member0' + str(member) + '_relax_6_small/post/',
name='member0' + str(member) + '_relax_6_T_2M_ts_monmean_1995.nc',
var='T_2M')
# pdf_name = "Figure_" + pdf + str(member) + "_" + str(buf) + "relax_6_small.pdf"
pdf_name="Figure05_RMSE.pdf"
if option == 7:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member_relax_9_small/post/',
name='member_relax_9_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work2/member0' + str(member) + '_relax_9_small/post/',
name='member0' + str(member) + '_relax_9_T_2M_ts_monmean_1995.nc',
var='T_2M')
# pdf_name = "Figure_" + pdf + str(member) + "_" + str(buf) + "relax_9_small.pdf"
pdf_name="Figure06_RMSE.pdf"
if option == 8:
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work3/member_relax_3_big/post/',
name='member_relax_3_T_2M_ts_monmean_1995.nc', var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work3/member0' + str(member) + '_relax_3_big/post/',
name='member0' + str(member) + '_relax_3_T_2M_ts_monmean_1995.nc',
var='T_2M')
# pdf_name = "Figure_" + pdf + str(member) + "_" + str(buf) + "relax_9_small.pdf"
pdf_name="Figure07_RMSE.pdf"
if option == 9:
SEAS="DJF"
name_2 = 'member_relax_3_T_2M_ts_splitseas_1984_2014_' + SEAS + '.nc'
name_1 = 'member04_relax_3_T_2M_ts_splitseas_1984_2014_' + SEAS + '.nc'
t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb1029/b324045/work4/member_relax_3_big/post/',
name=name_2, var='T_2M')
t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb1029/b324045/work4/member0' + str(member) + '_relax_3_big/post/',
name=name_1, var='T_2M')
# pdf_name = "Figure_" + pdf + str(member) + "_" + str(buf) + "relax_9_small.pdf"
pdf_name="Figure08_RMSE_T_2M.pdf"
#rel='6'
#t_o, lat_o, lon_o, rlat_o, rlon_o = read_data_from_mistral(dir='/work/bb0962/work2/member/post/',name='member_T_2M_ts_monmean_1995.nc',var='T_2M')
#t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb0962/work2/member/post/',name='member_T_2M_ts_monmean_1995.nc',var='T_2M')
#t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb0962/work2/member0'+str(member)+'/post/', name='member0'+str(member)+'_T_2M_ts_monmean_1995.nc',
# var='T_2M')
#t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb0962/work2/member0' + str(member) +'_relax_'+str(rel)+'/post/',
# name='member0' + str(member) +'_relax_'+str(rel)+ '_T_2M_ts_monmean_1995.nc',
# var='T_2M')
# t_f, lat_f, lon_f, rlat_f, rlon_f = read_data_from_mistral(dir='/work/bb0962/work2/member0' + str(member) +'_relax'+'/post/',
# name='member0' + str(member) +'_relax_T_2M_ts_monmean_1995.nc',
# var='T_2M')
#os.system('rm -f *.nc')
row_lat = lat_o[buffer, buffer].squeeze()
row_lon = lon_o[buffer, buffer].squeeze()
#print(row_lat)
#print(row_lon)
#print(lat_o)[0,0]
#print(lat_o)[0,-1]
#print(lon_f)
#start_lon = np.where(lon_f == row_lon)[-1][0]
#start_lat = np.where(lat_f == row_lat)[0][-1]
#start_lon = np.where((lon_f-row_lon)<0.001)[-1][0]
#start_lat = np.where((lat_f-row_lat)<0.001)[0][-1]
start_lon=(buffer+4)
start_lat=(buffer-4)
#print('nowwwwwwwww')
#print(start_lat)
#print(start_lon)
dext_lon = t_o.shape[2] - (2 * buffer)
dext_lat = t_o.shape[1] - (2 * buffer)
#print('thennnnnnnn')
#print(dext_lon)
#print(dext_lat)
month_length=20
forecast = t_f[0:month_length, start_lat:start_lat + dext_lat, start_lon:start_lon + dext_lon]
obs = t_o[0:month_length, buffer:buffer + dext_lat, buffer:buffer + dext_lon]
RMSE=np.zeros((forecast.shape[1],forecast.shape[2]))
RMSE_TIME_SERIES=np.zeros(forecast.shape[0])
lats_f1=lat_f[start_lat:start_lat + dext_lat, start_lon:start_lon + dext_lon]
lons_f1=lon_f[start_lat:start_lat + dext_lat, start_lon:start_lon + dext_lon]
#print(forecast.shape[:])
#print(obs.shape[:])
for i in range(0,forecast.shape[1]):
for j in range(0,forecast.shape[2]):
forecast_resh=np.squeeze(forecast[:,i,j])
obs_resh=np.squeeze(obs[:,i,j])
RMSE[i,j] = mean_squared_error(obs_resh, forecast_resh) ** 0.5
for i in range(0,forecast.shape[0]):
forecast_resh_ts=np.squeeze(forecast[i,:,:])
obs_resh_ts=np.squeeze(obs[i,:,:])
RMSE_TIME_SERIES[i] = mean_squared_error(obs_resh_ts, forecast_resh_ts) ** 0.5
return(RMSE_TIME_SERIES, RMSE, lats_f1, lons_f1, rlat_f, rlon_f, rlat_o, rlon_o, pdf_name)
import cartopy.crs as ccrs
import cartopy.feature
option=9
buf=20
for i in range(4,5):
SEAS="DJF"
nam_ts, nam , lats_f1, lons_f1, rlat_f, rlon_f, rlat_o, rlon_o , pdf_name = calculate_MAPS_RMSE_of_the_member(i, buffer=buf, option=option)
fig = plt.figure('1')
fig.set_size_inches(14, 10)
#Plot_CCLM(bcolor='black', grids='FALSE')
#rp = ccrs.RotatedPole(pole_longitude=-162.0,
# pole_latitude=39.25,
# globe=ccrs.Globe(semimajor_axis=6370000,
# semiminor_axis=6370000))
rp = ccrs.RotatedPole(pole_longitude=-165.0,
pole_latitude=46.0,
globe=ccrs.Globe(semimajor_axis=6370000,
semiminor_axis=6370000))
pc = ccrs.PlateCarree()
ax = plt.axes(projection=rp)
ax.coastlines('50m', linewidth=0.8)
#ax.add_feature(cartopy.feature.LAKES,
# edgecolor='black', facecolor='none',
# linewidth=0.8)
#v = np.linspace(0, 1, 11, endpoint=True)
if SEAS[0] == "D":
v = np.linspace(0, 4, 9, endpoint=True)
else:
v = np.linspace(0, 2, 9, endpoint=True)
cs = plt.contourf(lons_f1,lats_f1,nam, v, transform=ccrs.PlateCarree(), cmap=plt.cm.terrain)
#cs = plt.contourf(lons_f1,lats_f1,nam, transform=ccrs.PlateCarree(), cmap=plt.cm.terrain)
cb = plt.colorbar(cs)
cb.set_label('RMSE [K]', fontsize=20)
cb.ax.tick_params(labelsize=20)
ax.add_feature(cartopy.feature.OCEAN,
edgecolor='black', facecolor='white',
linewidth=0.8)
ax.gridlines()
ax.text(-45.14, 15.24, r'$45\degree N$',
fontsize=15)
ax.text(-45.14, 35.73, r'$60\degree N$',
fontsize=15)
ax.text(-45.14, -3.73, r'$30\degree N$',
fontsize=15)
ax.text(-45.14, -20.73, r'$15\degree N$',
fontsize=15)
ax.text(-19.83, -35.69, r'$0\degree $',
fontsize=15)
ax.text(15.106, -35.69, r'$20\degree E$',
fontsize=15)
#ax.text(26, -29.69, r'$40\degree E$',
# fontsize=15)
plt.hlines(y=min(rlat_f), xmin=min(rlon_f), xmax=max(rlon_f), color='red',linestyles= 'dashed', linewidth=2)
plt.hlines(y=max(rlat_f), xmin=min(rlon_f), xmax=max(rlon_f), color='red',linestyles= 'dashed', linewidth=2)
plt.vlines(x=min(rlon_f), ymin=min(rlat_f), ymax=max(rlat_f), color='red',linestyles= 'dashed', linewidth=2)
plt.vlines(x=max(rlon_f), ymin=min(rlat_f), ymax=max(rlat_f), color='red',linestyles= 'dashed', linewidth=2)
plt.hlines(y=min(rlat_o), xmin=min(rlon_o), xmax=max(rlon_o), color='black',linestyles= 'dashed', linewidth=2)
plt.hlines(y=max(rlat_o), xmin=min(rlon_o), xmax=max(rlon_o), color='black',linestyles= 'dashed', linewidth=2)
plt.vlines(x=min(rlon_o), ymin=min(rlat_o), ymax=max(rlat_o), color='black',linestyles= 'dashed', linewidth=2)
plt.vlines(x=max(rlon_o), ymin=min(rlat_o), ymax=max(rlat_o), color='black',linestyles= 'dashed', linewidth=2)
plt.hlines(y=min(rlat_o[buf:-buf]), xmin=min(rlon_o[buf:-buf]), xmax=max(rlon_o[buf:-buf]), color='black', linewidth=4)
plt.hlines(y=max(rlat_o[buf:-buf]), xmin=min(rlon_o[buf:-buf]), xmax=max(rlon_o[buf:-buf]), color='black', linewidth=4)
plt.vlines(x=min(rlon_o[buf:-buf]), ymin=min(rlat_o[buf:-buf]), ymax=max(rlat_o[buf:-buf]), color='black', linewidth=4)
plt.vlines(x=max(rlon_o[buf:-buf]), ymin=min(rlat_o[buf:-buf]), ymax=max(rlat_o[buf:-buf]), color='black', linewidth=4)
#plt.title("Shift "+ str(i)+pdf_name)
xs, ys, zs = rp.transform_points(pc,
np.array([-17, 105.0]),
np.array([3, 60])).T
# rp = ccrs.RotatedPole(pole_longitude=-162.0,
# pole_latitude=39.25,
# globe=ccrs.Globe(semimajor_axis=6370000,
# semiminor_axis=6370000))
ax.set_xlim(xs)
ax.set_ylim(ys)
plt.savefig(pdf_name)
plt.close()
# RMSE time-series
fig = plt.figure('2')
fig.set_size_inches(14, 10)
plt.plot(nam_ts,'o-', c= 'green')
plt.xlabel('$time$', size=35)
plt.ylabel('$RMSE$', size=35)
plt.ylim([0,.45])
plt.savefig(pdf_name+'_ts.pdf')
plt.close()
import csv
names='/home/fallah/Documents/DATA_ASSIMILATION/Bijan/CODES/CCLM/Python_Codes/historical_runs_yearly/src/TEMP/' + pdf_name+'_Forecast.csv'
with open(names, 'wb') as f:
writer = csv.writer(f)
writer.writerow(nam_ts)
| mit |
pascalgutjahr/Praktikum-1 | Beugung/einzel.py | 1 | 1926 | import matplotlib as mpl
from scipy.optimize import curve_fit
mpl.use('pgf')
import matplotlib.pyplot as plt
plt.rcParams['lines.linewidth'] = 1
import numpy as np
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
d = np.array([16.85,17.35,17.85,18.35,18.85,19.35,19.85,20.35,20.85,21.35,21.85,22.35,22.85,23.35,23.85,
24.35,24.85,25.35,25.85,26.35,26.85,27.35,27.60,27.85,28.10,28.35,28.60,28.85,29.10,29.35,29.85,30.35,30.85,31.35,31.85,
32.35,32.85,33.35,33.85,34.35,34.85,35.35,35.85,36.35,36.85,37.35,37.85,38.35,38.85,39.35,39.85])
I = np.array([3,2,1,2,4,7,8,7,4,2,6,14,24,28,21,8,6,42,120,280,490,
680,780,840,850,860,840,800,740,640,440,240,100,10,5,10,20,42,32,16,8,10,16,22,22,
16,8,4,5,8,12])
I = (I-0.3)*10**(-6)
d = d*1e-3
a = 860e-6
b = 0.15e-3
l = 1
lam = 532e-9
phi = (d - 0.02835) / l
j = a * ((np.sin((np.pi * b * np.sin(phi))/lam)) / ((np.pi * b * np.sin(phi)) / (lam)))**2
def f(d,a,b,l):
return b*b*a*a*((l/(np.pi*a*np.sin((d-0.02835)/1))
)*(l/(np.pi*a*np.sin((d-0.02835)/1))))*((np.sin(
(np.pi*a*np.sin((d-0.02835)/1))/l))*(np.sin((np.pi*a*np.sin((d-0.02385)/1))/l)))
params, covariance= curve_fit(f, d, I)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '±', errors[0])
print('b =', params[1], '±', errors[1])
print('l =', params[2], '±', errors[2])
# plt.plot((d-0.02835),f(d,*params), '-', color = 'blueviolet', label='Ausgleichsgerade', linewidth=1)
plt.plot((d-0.02835), j*10**6, '-', color = "mediumturquoise", label = "Ausgleichskurve", linewidth = 1)
plt.plot((d-0.02835), I*10**6, 'x', color = "forestgreen", label = "Messwerte", linewidth = 1)
plt.xlabel(r'$\zeta - \zeta_0 \,/\,\si{\meter}$')
plt.ylabel(r'$I\,/\,\si{\micro\ampere}$')
plt.grid()
plt.legend()
plt.tight_layout()
plt.savefig("bilder/Einzelspalt.pdf")
| mit |
anne-urai/serialDDM | simulations/DDM_fits.py | 1 | 8631 | #!/usr/bin/env python
# encoding: utf-8
"""
Created by Jan Willem de Gee on 2011-02-16.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import os, sys, pickle, time
import datetime
import collections
import math
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import hddm
import kabuki
from IPython import embed as shell
from joblib import Parallel, delayed
import json
from jw_tools import myfuncs
from jw_tools import ddm_tools
matplotlib.rcParams['pdf.fonttype'] = 42
sns.set(style='ticks', font='Arial', font_scale=1, rc={
'axes.linewidth': 0.25,
'axes.labelsize': 7,
'axes.titlesize': 7,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'legend.fontsize': 6,
'xtick.major.width': 0.25,
'ytick.major.width': 0.25,
'text.color': 'Black',
'axes.labelcolor':'Black',
'xtick.color':'Black',
'ytick.color':'Black',} )
sns.plotting_context()
base_dir = os.path.expanduser('~/Desktop/simulations/')
data_dir = os.path.join(base_dir, 'ddm_fits_data')
model_dir = os.path.join(base_dir, 'ddm_fits_model')
datasets = [
"2018_ou_data_1", # 0
"2018_ou_data_2", # 1
"2018_ou_data_3", # 2
]
def fit_ddm_per_group(data, model, model_dir, model_name, samples=5000, burn=1000, thin=1, n_models=3, n_jobs=12):
res = Parallel(n_jobs=n_jobs)(delayed(fit_ddm_hierarchical)(df, model, model_dir, model_name, samples, burn, thin, model_id) for model_id in range(n_models))
def fit_ddm_hierarchical(data, model, model_dir, model_name, samples=5000, burn=1000, thin=1, model_id=0):
exec('global m; m = {}'.format(model))
m.find_starting_values()
m.sample(samples, burn=burn, thin=thin, dbname=os.path.join(model_dir, '{}_{}.db'.format(model_name, model_id)), db='pickle')
m.save(os.path.join(model_dir, '{}_{}.hddm'.format(model_name, model_id)))
return m
def load_ddm_per_group(model_dir, model_name, n_models=3):
models = [kabuki.utils.load(os.path.join(model_dir, '{}_{}.hddm'.format(model_name, model_id))) for model_id in range(n_models)]
return models
def fit_ddm_per_subject(data, model, model_dir, model_name, n_runs=5, n_jobs=12):
# res = Parallel(n_jobs=n_jobs, backend='loky')(delayed(fit_ddm_subject)(subj_data, subj_idx, model, model_dir, model_name, n_runs) for subj_idx, subj_data in df.groupby('subj_idx'))
res = []
for subj_idx, subj_data in df.groupby('subj_idx'):
res.append(fit_ddm_subject(subj_data, subj_idx, model, model_dir, model_name, n_runs))
res = pd.concat(res, axis=0)
res.to_csv(os.path.join(model_dir, '{}_params_flat.csv'.format(model_name)))
return res
def fit_ddm_subject(data, subj_idx, model, model_dir, model_name, n_runs=5):
import hddm
data = data.loc[data["subj_idx"]==subj_idx,:]
exec('global m; m = {}'.format(model))
# optimize:
m.optimize('gsquare', quantiles=analysis_info['quantiles'], n_runs=n_runs)
res = pd.concat((pd.DataFrame([m.values], index=[subj_idx]), pd.DataFrame([m.bic_info], index=[subj_idx])), axis=1)
return res
def load_ddm_per_subject(model_dir, model_name):
return pd.read_csv(os.path.join(model_dir, '{}_params_flat.csv'.format(model_name))).drop('Unnamed: 0', 1)
# dataset = 4
# version = 0
run = True
for ds in [0,1,2,]:
for version in [0]:
# load analysis info:
with open(os.path.join(data_dir, '{}.json'.format(datasets[ds]))) as json_data:
analysis_info = json.load(json_data)
# load data:
try:
data = pd.read_csv(os.path.join(data_dir, analysis_info['data_file'])).drop('Unnamed: 0', 1)
except:
data = pd.read_csv(os.path.join(data_dir, analysis_info['data_file']))
# stimcoding?
if analysis_info['stimcoding'] == "True":
stimcoding = True
data.rename(columns={'choice_a':'response'}, inplace=True)
else:
stimcoding = False
data.rename(columns={'correct':'response'}, inplace=True)
# variables:
subjects = np.unique(data.subj_idx)
nr_subjects = len(subjects)
model = analysis_info["model"][version]
# run:
for split_by in analysis_info['split_by']:
# model_name:
model_name = '{}_{}_{}'.format(analysis_info["model_name"], split_by, version)
# create figure dir:
fig_dir = model_dir = os.path.join(base_dir, 'ddm_fits_figs', model_name)
try:
os.system('mkdir {}'.format(fig_dir))
os.system('mkdir {}'.format(os.path.join(fig_dir, 'diagnostics')))
except:
pass
# prepare dataframe:
df = data.copy()
# fit model:
if run:
print("fitting {}".format(model_name))
n_jobs = 4
# # hierarchical:
# results = fit_ddm_per_group(data, model, model_dir, model_name, samples=5000, burn=1000, thin=1, n_models=3, n_jobs=n_jobs)
# flat:
results = fit_ddm_per_subject(df, model, model_dir, model_name, n_runs=5, n_jobs=n_jobs)
# for fit_type in ['flat', 'hierarchical']:
for fit_type in ['flat']:
if fit_type == 'flat':
results = load_ddm_per_subject(model_dir, model_name)
else:
models = load_ddm_per_group(model_dir, model_name, n_models=3)
# gelman rubic:
gr = hddm.analyze.gelman_rubin(models)
text_file = open(os.path.join(fig_dir, 'diagnostics', 'gelman_rubic_{}.txt'.format(fit_type)), 'w')
for p in gr.items():
text_file.write("%s:%s\n" % p)
text_file.close()
# dic:
text_file = open(os.path.join(fig_dir, 'diagnostics', 'DIC_{}.txt'.format(fit_type)), 'w')
for i, m in enumerate(models):
text_file.write("Model {}: {}\n".format(i, m.dic))
text_file.close()
# posteriors:
m.plot_posteriors(save=True, path=os.path.join(fig_dir, 'diagnostics'), format='pdf')
# dataframe:
m = models[1]
results = m.gen_stats()['50q'].reset_index()
results = results.loc[['subj' in c for c in results["index"]],:]
a = np.array([results.iloc[i]["index"].split('.')[0] for i in range(results.shape[0])])
_, idx = np.unique(a, return_index=True)
cols = a[np.sort(idx)]
cols = np.array([c.replace('_subj', '') for c in cols])
results = pd.DataFrame(np.vstack([np.array(results.loc[np.array([str(subj_idx) == c.split('.')[-1] for c in results["index"]]),:]["50q"]) for subj_idx in subjects]))
results.columns = cols
try:
params = results.drop(['bic', 'likelihood', 'penalty'], 1)
except:
params = results.copy()
params = params.loc[:,~np.array(['z_trans' in c for c in params.columns])]
params.to_csv(os.path.join(fig_dir, 'params_{}.csv'.format(fit_type)))
# barplot:
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111)
sns.barplot(data=params, ax=ax)
sns.despine(offset=5, trim=True)
plt.tight_layout()
fig.savefig(os.path.join(fig_dir, 'bars_{}.pdf'.format(fit_type)))
# barplot only z and dc:
params['z'] = params['z'] - 0.5
fig = plt.figure(figsize=(1.25,1.6))
ax = fig.add_subplot(111)
sns.barplot(data=params.loc[:,['z', 'dc']], ax=ax)
ax.set_ylim(0,0.8)
sns.despine(offset=5, trim=True)
plt.tight_layout()
fig.savefig(os.path.join(fig_dir, 'bars2_{}.pdf'.format(fit_type))) | mit |
zangsir/sms-tools | lectures/05-Sinusoidal-model/plots-code/sineModelAnal-bendir.py | 24 | 1245 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
w = np.hamming(2001)
N = 2048
H = 200
t = -80
minSineDur = .02
maxnSines = 150
freqDevOffset = 10
freqDevSlope = 0.001
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
plt.figure(1, figsize=(9.5, 7))
maxplotfreq = 800.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (bendir.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-bendir.png')
plt.show() | agpl-3.0 |
ruleva1983/udacity-selfdrivingcar | term1/P4_advanced_lane_finding/classes.py | 1 | 13102 | import cv2
import numpy as np
import os
from moviepy.editor import VideoFileClip
from IPython.display import display, HTML
from preprocess import Masker, DistRemover, PersTransformer
from utils import superimpose_images
class Line(object):
"""
A class that holds the a single road line. Usually instantiated
without arguments. If wrong argument are passed, default instantiation
is imposed.
:param coeffs: The polynomial coefficients of the line
:param degree: The degree of the polynomial fitted
"""
yM_PIXEL = 0.042
xM_PIXEL = 0.0053
def __init__(self, coeffs=None, degree=None):
self.coeffs = coeffs
if coeffs is not None:
try:
assert (degree == len(coeffs) - 1)
except AssertionError:
self.polydegree = None
self.coeffs = None
print ("Line instantiated with degree and coefficient conflict. Imposing default instantiation!!")
self.coeffs_meter = None
def fit(self, x, y, degree=2):
"""
Fits the line on a plane with a polynomial of given degree in both pixel and real space.
The method is adapted to the image case where the independent variable is y and the
dependent variable is x.
:param x: The dependent variable.
:param y: The independent variable.
:param degree: The degree of the polynomial.
"""
assert len(x) == len(y)
self.polydegree = degree
self.coeffs = np.polyfit(y, x, degree)
self.coeffs_meter = np.polyfit(self.yM_PIXEL * y, self.xM_PIXEL * x, degree)
def exists(self):
"""
Checks if the line has been fitted once.
:return: True if fit has been performed, False otherwise.
"""
if self.coeffs is None:
return False
return True
def evaluate_curvature(self, y):
"""
Evaluates and returns the curvature radius of the line in meters at position pixel y.
Raises assertion error if the line has not been previously fitted.
:param y: The position in pixels.
:return : The curvature radius in meters.
"""
assert self.exists()
yp = y * self.yM_PIXEL
A, B = self.coeffs_meter[0], self.coeffs_meter[1]
return ((1 + (2 * A * yp + B) ** 2) ** (1.5)) / np.absolute(2 * A)
def get_line_points(self, x):
"""
Evaluates the images of the input array through the line equation. Assertion exception
thrown if the line has not been fitted.
:param x: An array of independent variable points.
:return: A tuple of input points and their images according to the line equation.
"""
assert self.exists()
y = np.zeros_like(x)
for i in range(self.polydegree + 1):
y += self.coeffs[i] * np.power(x, self.polydegree - i)
return x, y
@staticmethod
def get_line_pixels(mask, x0, window_size=(100, 100)):
"""
It detects which pixels in the mask belong to a line starting from
the lower part of the image.
:param mask: A mask image, one channel, either zero or one values.
:param x0: The initial center of the sliding window.
:param window_size: A tuple of windows sizes (horizontal, vertical)
:return: A tuple containing: An array for x coordinates of the detected pixels;
An array with y coordinates of the detected pixels; The sliding windows
coordinates to be passed as arguments for cv2.rectangle function.
"""
n_windows = int(mask.shape[0] / window_size[1])
full_line_pixels_x, full_line_pixels_y = [], []
win_pixels_x, win_pixels_y = [], []
y_below = mask.shape[0]
x_left = x0 - window_size[0] // 2
windows = [[(x_left, y_below), (x_left + window_size[0], y_below - window_size[1])]]
for x in range(x_left, x_left + window_size[0]):
for y in range(y_below - window_size[1], y_below):
if mask[y, x] == 1:
win_pixels_x.append(x)
win_pixels_y.append(y)
full_line_pixels_x += win_pixels_x
full_line_pixels_y += win_pixels_y
for n in range(1, n_windows):
if win_pixels_x:
x_left = int(np.mean(win_pixels_x)) - window_size[0] // 2
y_below -= window_size[1]
windows.append([(x_left, y_below), (x_left + window_size[0], y_below - window_size[1])])
win_pixels_x, win_pixels_y = [], []
for x in range(x_left, x_left + window_size[0]):
for y in range(y_below - window_size[1], y_below):
if mask[y, x] > 0:
win_pixels_x.append(x)
win_pixels_y.append(y)
full_line_pixels_x += win_pixels_x
full_line_pixels_y += win_pixels_y
return np.array(full_line_pixels_x), np.array(full_line_pixels_y), windows
class RoadLane(object):
"""
A class holding a Lane of the Road, i.e. a left and a right line
that form the lane.
"""
def __init__(self):
self.rightLine = Line()
self.leftLine = Line()
def lane_curvature(self, y=720):
"""
Evaluates the curvature of the lane in meters, by taking the mean curvature
of the two lines evaluated at position y.
:param y: pixel vertical position.
:return : The average curvature.
"""
return self.leftLine.evaluate_curvature(y), self.rightLine.evaluate_curvature(y)
def relative_car_position(self, width=1280, y=720.):
"""
Returns the distance in meters to the center of the line.
If negative the car stands on the right side of the center.
:param width: The number of horizontal pixels of the image
:param y: The vertical position of the camera in the image (lowest part)
"""
x_l = self.leftLine.get_line_points(y)[1]
x_r = self.rightLine.get_line_points(y)[1]
return ((x_r + x_l) / 2. - width / 2.) * self.rightLine.xM_PIXEL
def update(self, mask):
"""
If the lines have not been previously determined, it generates
them using the full algorithm. Otherwise it updates the already
existing lines with new data image.
:param mask: The new image mask (one channel, only zeros and ones values)
"""
if not (self.rightLine.exists() and self.leftLine.exists()):
self._generateLane(mask)
else:
self._updateLane(mask)
def _generateLane(self, mask, hist_level=None, degree=2):
"""
Estimate the lines from an input mask image using a sliding
windows algorithm. First it estimates the initial pixel position
of the left and right lines using an histogram approach.
Then uses these values to track the lines from the bottom to the top.
Finalizes with fitting the two lines updating their coefficients.
:param mask: The input mask image
:param hist_level: The upper pixel limit for histogram search.
:param degree: The degree of the polinomial to fit the lines
"""
init_left, init_right, _ = RoadLane.get_initial_points(mask, hist_level)
pixels_x, pixels_y, _ = Line.get_line_pixels(mask, init_left)
self.leftLine.fit(pixels_x, pixels_y, degree)
pixels_x, pixels_y, _ = Line.get_line_pixels(mask, init_right)
self.rightLine.fit(pixels_x, pixels_y, degree)
def _updateLane(self, mask):
left = Tracker(self.leftLine)
left.update_line(mask)
right = Tracker(self.rightLine)
right.update_line(mask)
def draw_free_space(self, image, color=(0, 255, 0)):
"""
Draws the free space between the lines on the input image.
:param image: The input three channel image
:param color: The color of the free space area in RGB color space.
:return : The image with the superimposed lane area, the original image
"""
xl, yl = self.leftLine.get_line_points(np.linspace(0, 720, 100))
xr, yr = self.rightLine.get_line_points(np.linspace(0, 720, 100))
pts_left = np.array([np.transpose(np.vstack([yl, xl]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([yr, xr])))])
pts = np.hstack((pts_left, pts_right))
drawn_image = np.copy(image)
cv2.fillPoly(drawn_image, np.int_([pts]), color)
return drawn_image, image
def draw_lines(self, image, thick=10, color=(255, 0, 0)):
"""
Draws the lines on an input image.
:param image: The input image
:param thick: The tickness of the line to be drawn
:param color: The RGB color of the line
:return : The image with superimposed lines and the original image
"""
xl, yl = self.leftLine.get_line_points(np.linspace(0, 720, 100))
xr, yr = self.rightLine.get_line_points(np.linspace(0, 720, 100))
drawn_image = np.copy(image)
for i in range(len(yl) - 1):
p1 = (int(yl[i]), int(xl[i]))
p2 = (int(yl[i + 1]), int(xl[i + 1]))
cv2.line(drawn_image, p1, p2, color=color, thickness=thick)
for i in range(len(yr) - 1):
p1 = (int(yr[i]), int(xr[i]))
p2 = (int(yr[i + 1]), int(xr[i + 1]))
cv2.line(drawn_image, p1, p2, color=color, thickness=thick)
return drawn_image, image
@staticmethod
def get_initial_points(mask, hist_level=None):
"""
Estimates initial search points for left and right lines.
:param mask: The mask image
:param hist_level:
:return: The left and the right initial points, as well as the histogram values
for visualization
"""
if hist_level is None:
histogram = np.sum(mask[mask.shape[0] // 2:, :], axis=0)
else:
histogram = np.sum(mask[hist_level:, :], axis=0)
middle = len(histogram) // 2
left = np.argmax(histogram[:middle])
right = middle + np.argmax(histogram[middle:])
return left, right, histogram
class Tracker(object):
"""
This object holds an already fitted line, and updates it according to a new input mask
using a lighter algorithm.
:param line: A line object already fitted
"""
def __init__(self, line):
assert line.exists()
self.line = line
def update_line(self, mask):
"""
The main method of the class. It performs the update of the line coefficients.
:param mask: The input mask
"""
area_mask = self._create_search_area(mask)
line_mask = cv2.bitwise_and(area_mask, mask)
points = np.squeeze(cv2.findNonZero(line_mask))
pixels_x = points[:, 0]
pixels_y = points[:, 1]
self.line.fit(pixels_x, pixels_y)
def _create_search_area(self, mask, width=200):
"""
Creates an
:param mask: The input mask
:param width:
:return:
"""
lane = RoadLane()
lane.leftLine.coeffs = np.copy(self.line.coeffs)
lane.leftLine.polydegree = 2
lane.leftLine.coeffs[-1] -= width // 2
lane.rightLine.coeffs = np.copy(self.line.coeffs)
lane.rightLine.polydegree = 2
lane.rightLine.coeffs[-1] += width // 2
area_mask = np.zeros_like(mask)
area_mask, _= lane.draw_free_space(area_mask, color=(255, 255, 255))
return area_mask
class Pipeline(object):
def __init__(self, remover=None):
self.lane = RoadLane()
# We need to pass a remover otherwise it will always calibrate
if remover is None:
self.Remover = DistRemover()
else:
self.Remover = remover
self.Transformer = PersTransformer()
def run(self, img):
img_undistorted = self.Remover.remove_distorsion(img)
img_masked, _, _ = Masker.combined_mask(img_undistorted)
img_warped = self.Transformer.transform(img_masked)
Minv = self.Transformer.Minv
self.lane.update(img_warped)
img_out = superimpose_images(img, self.lane, Minv, self.lane.lane_curvature(720))
return img_out
if __name__ == "__main__":
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#example = 'data/test_images/test5.jpg'
#img = mpimg.imread(example)
pipeline = Pipeline()
#plt.imshow(pipeline.run(img))
#plt.show()
#example = 'data/test_images/test6.jpg'
#img = mpimg.imread(example)
#pipeline = Pipeline()
#plt.imshow(pipeline.run(img))
#plt.show()
#Remover = DistRemover()
video = "data/videos/challenge_video.mp4"
clip = VideoFileClip(video)
clip_processed = clip.fl_image(pipeline.run)
_, video_name = os.path.split(video)
out_name = os.path.join("data/report_images/", video_name)
clip_processed.write_videofile(out_name, audio=False) | gpl-3.0 |
nickp60/open_utils | grabHits/grabHits.py | 1 | 8667 | #!/usr/bin/env python
"""
version 0.1
Minor version changes:
- pep8 can make output directories directly
Given a nucleotide sequence, get pfam stuff with their rest api, return
USAGE:
$ python snagnblast.py accessions.txt_or_accessions.csv /BLAST/directory/ /output/directory/
"""
print("Warning! This script is depreciated in favor of snagnblast_multi.py")
import os
#import sys
#import re
import datetime
import subprocess
import argparse
from Bio import SeqIO, Entrez
#from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import pandas as pd
#import numpy as np
#from Bio.Alphabet import IUPAC
#from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast.Applications import NcbitblastxCommandline
from Bio.Align.Applications import ClustalwCommandline
DEBUG = True
#%%
#define inputs
if DEBUG:
genelist = os.path.expanduser("~/GitHub/FB/Ecoli_comparative_genomics/data/test_virgenes_bp.csv")
blastdb = os.path.expanduser("~/BLAST/env_Coli")
output = os.path.expanduser("~/GitHub/FB/Ecoli_comparative_genomics/results/")
score_min = 70
blasttype = "tblastx"
else:
parser = argparse.ArgumentParser(description="This script takes a list of gene accessions \
from either a text file or a csv, grabs the sequencs from NCBI, and proceeds \
to use either blastn or tblastx to detect the presence of the genes in a custom \
database")
parser.add_argument("genelist", help="file containing gene accessions. if delimited, use \
the headers in the example file as a template")
parser.add_argument("blastdb", help="blastdb of interest")
parser.add_argument("-o", "--output", help="directory in which to place the output files")
parser.add_argument("-s", "--score_min", help="not currently used; will be used to \
determinine a scoring threshold")
parser.add_argument("-t", "--blast_type", help="blastn or tblastx")
args = parser.parse_args()
genelist = args.genelist
blastdb = args.blastdb
blasttype = args.blast_type
output = args.output
score_min = args.score_min
date = str(datetime.datetime.now().strftime('%Y%m%d'))
if not os.path.isdir(output):
print("creating %s" % output)
os.mkdir(output)
#%% open accessions file, determine type, and parse
Entrez.email = "alfredTheDaring@gmail.com"
print("reading in gene list")
genes = open(genelist, "r")
if genes.name.endswith("csv"):
genelist_type = "delim"
print("gene list is a comma-deliminated file")
n = ("accession", "name", "phenotype", "function", "genome", "note", "source")
genedf = pd.read_csv(genes, sep=",")
genenames = genedf.iloc[0:, 1].tolist()
genenames = [x for x in genenames if str(x) != 'nan']
genesred = genedf.iloc[0:, 1:3]
else:
print("Reading error; only accepts csv's")
#%% Grab sequences from NCBI, write out resulting fasta file
output_seq_dir = os.path.join(output, str(date+"files_from_grabHits"), "")
os.mkdir(output_seq_dir)
# defaults
gene = "stx1"
db = "nucleotide"
retmax = "100"
field = "[All Fields]"
organism = "Escherichia coli[porgn]"
len_start, len_end = "1", "1000"
use_history = "y" # y or n
#%%
seq_res_list = []
clustalw_comms = []
for i in range(0, len(genesred.index)):
print(i)
if i < 5 and genesred.iloc[i, 0] != "nan" and genesred.iloc[i, 1] != "nan":
with open(os.path.join(output_seq_dir, str(genesred.iloc[i, 1] + "_seqs.fasta")), "w") as outfile:
acc = genesred.iloc[i, 1]
print(acc)
length = genesred.iloc[i, 0]
print(length)
query = str(acc + field + " AND " + organism + " AND " + len_start + "[SLEN] : " +
str(round(length*2)) + "[SLEN]")
esearch_handle = Entrez.esearch(db="nucleotide", term=query, usehistory=True, retmax=30)
result = Entrez.read(esearch_handle)
webEnv = result['WebEnv']
print(webEnv)
queryKey = result["QueryKey"]
print(queryKey)
efetch_handle = Entrez.efetch(db="nucleotide",retmode="text",rettype="fasta",
webenv=webEnv, query_key=queryKey)
outfile.write(efetch_handle.read())
efetch_handle.close()
seq_res_list.append(os.path.join(output_seq_dir, str(genesred.iloc[i, 1] + "_seqs.fasta")))
clustalw_comms.append(ClustalwCommandline("clustalw2",
infile=os.path.join(output_seq_dir, str(genesred.iloc[i, 1] + "_seqs.fasta"))))
# search_res_list.append(search_handle)
#%%
for i in search_res_list
result = Entrez.read(request)
webEnv = result["WebEnv"]
queryKey = result["QueryKey"]
handle = Entrez.efetch(db="nucleotide",retmode="xml", webenv=webEnv, query_k
#%%
seqs = SeqIO.parse(sequence_handle, "fasta")
def esearch():
esearchCall = str("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=" + db +
"&retmax=" + retmax + "&term=" + acc + field + "+" + organism + "+" +
len_range + "&usehistory=" + use_history)
def efetch():
efetchCall = str("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.cgi?db=" +
db + "&query_key=1&WebEnv=" + webenv + "&rettype=fasta")
for acc, index in enumerate(accessions):
print("\n\nFetching %i accessions from NCBI" % len(accessions))
sequence_handle = Entrez.efetch(db="nucleotide", id=accessions, rettype="fasta")
seqs = SeqIO.parse(sequence_handle, "fasta")
with open(str(os.path.join(output, date)+"_sequences.fa"), "w") as fasta_output:
SeqIO.write(seqs, fasta_output, "fasta")
#%%
sequences_fasta = open(str(os.path.join(output, date)+"_sequences.fa"), "r")
entrez_results = list(SeqIO.parse(sequences_fasta, "fasta"))
#%%
for i, rec in enumerate(entrez_results):
if i < 5:
protein = entrez_results
#%%
print("returned %i accessions from ncbi" % len(entrez_results))
if(len(accessions) != len(entrez_results)):
print("Warning! not all accessions were found!")
sequences_fasta.close()
#%%
def run_blastn():
# build commandline call
output_path_tab = str(os.path.join(output, date)+"_dcmegablast_results.tab")
blast_cline = NcbiblastnCommandline(query=fasta_output.name,
db=blastdb, evalue=10,
outfmt=7, out=output_path_tab)
add_params = " -num_threads 4 -max_target_seqs 2000 -task dc-megablast"
blast_command = str(str(blast_cline)+add_params)
print("Running blastn search...")
# subprocess.Popen(blast_command, stdout=subprocess.PIPE, shell=True).stdout.read()
subprocess.call(blast_command, shell=True)
return(output_path_tab)
def run_tblastx():
# build commandline call
output_path_tab = str(os.path.join(output, date)+"_tblastx_results.tab")
blast_cline = NcbitblastxCommandline(query=fasta_output.name,
db=blastdb, evalue=10,
outfmt=7, out=output_path_tab)
add_params = " -num_threads 4 -max_target_seqs 2000 -query_gencode 11 -db_gencode 11"
blast_command = str(str(blast_cline)+add_params)
print("Running tblastx search...")
# subprocess.Popen(blast_command, stdout=subprocess.PIPE, shell=True).stdout.read()
subprocess.call(blast_command, shell=True)
return(output_path_tab)
#%% Execute
if blasttype == "blastn":
output_path_tab = run_blastn()
elif blasttype == "tblastx":
output_path_tab = run_tblastx()
else:
print("you need to use either blastn or tblastx, sorry!")
#%% parse output
print("cleaning up the csv output")
colnames = ["query_id", "subject_id", "identity_perc", "alignment_length", "mismatches",
"gap_opens", "q_start", "q_end", "s_start", "s_end", "evalue", "bit_score"]
csv_results = pd.read_csv(open(output_path_tab), comment="#", sep="\t", names=colnames)
#This regex will probably break things rather badly before too long...
# it looks for capital letter and numbers, dot, number, ie SHH11555JJ8.99
csv_results["accession"] = csv_results.query_id.str.extract('(?P<accession>[A-Z _\d]*\.\d*)')
#%% write out results with new headers or with new headers and merged metadat from accessions.tab
output_path_csv = str(os.path.splitext(output_path_tab)[0]+".csv")
if genelist_type == "delim":
results_annotated = pd.merge(csv_results, genedf, how="left", on="accession")
results_annotated.to_csv(open(output_path_csv, "w"))
else:
csv_results.to_csv(open(output_path_csv, "w"))
| mit |
ScreamingUdder/mantid | qt/applications/workbench/workbench/plotting/functions.py | 1 | 7142 | # This file is part of the mantid workbench.
#
# Copyright (C) 2017 mantidproject
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Defines a collection of functions to support plotting workspaces with
our custom window.
"""
# std imports
import math
# 3rd party imports
from mantid.api import MatrixWorkspace
import matplotlib.pyplot as plt
# local imports
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
PROJECTION = 'mantid'
DEFAULT_COLORMAP = 'viridis'
# See https://matplotlib.org/api/_as_gen/matplotlib.figure.SubplotParams.html#matplotlib.figure.SubplotParams
SUBPLOT_WSPACE = 0.5
SUBPLOT_HSPACE = 0.5
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def raise_if_not_sequence(seq, seq_name):
accepted_types = [list, tuple]
if type(seq) not in accepted_types:
raise ValueError("{} should be a list or tuple".format(seq_name))
def _validate_plot_inputs(workspaces, spectrum_nums, wksp_indices):
"""Raises a ValueError if any arguments have the incorrect types"""
if spectrum_nums is not None and wksp_indices is not None:
raise ValueError("Both spectrum_nums and wksp_indices supplied. "
"Please supply only 1.")
if not isinstance(workspaces, MatrixWorkspace):
raise_if_not_sequence(workspaces, 'Workspaces')
if spectrum_nums is not None:
raise_if_not_sequence(spectrum_nums, 'spectrum_nums')
if wksp_indices is not None:
raise_if_not_sequence(wksp_indices, 'wksp_indices')
def _validate_pcolormesh_inputs(workspaces):
"""Raises a ValueError if any arguments have the incorrect types"""
if not isinstance(workspaces, MatrixWorkspace):
raise_if_not_sequence(workspaces, 'Workspaces')
def plot(workspaces, spectrum_nums=None, wksp_indices=None, errors=False):
"""
Create a figure with a single subplot and for each workspace/index add a
line plot to the new axes. show() is called before returning the figure instance. A legend
is added.
:param workspaces: A list of workspace handles
:param spectrum_nums: A list of spectrum number identifiers (general start from 1)
:param wksp_indices: A list of workspace indexes (starts from 0)
:param errors: If true then error bars are added for each plot
:returns: The figure containing the plots
"""
# check inputs
_validate_plot_inputs(workspaces, spectrum_nums, wksp_indices)
if spectrum_nums is not None:
kw, nums = 'specNum', spectrum_nums
else:
kw, nums = 'wkspIndex', wksp_indices
# create figure
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(111, projection=PROJECTION)
plot_fn = ax.errorbar if errors else ax.plot
for ws in workspaces:
for num in nums:
plot_fn(ws, **{kw: num})
ax.legend()
ax.set_title(workspaces[0].name())
fig.canvas.draw()
fig.show()
return fig
def pcolormesh(workspaces):
"""
Create a figure containing subplots
:param workspaces: A list of workspace handles
:param spectrum_nums: A list of spectrum number identifiers (general start from 1)
:param wksp_indices: A list of workspace indexes (starts from 0)
:param errors: If true then error bars are added for each plot
:returns: The figure containing the plots
"""
# check inputs
_validate_pcolormesh_inputs(workspaces)
# create a subplot of the appropriate number of dimensions
# extend in number of columns if the number of plottables is not a square number
workspaces_len = len(workspaces)
square_side_len = int(math.ceil(math.sqrt(workspaces_len)))
nrows, ncols = square_side_len, square_side_len
if square_side_len*square_side_len != workspaces_len:
# not a square number - square_side_len x square_side_len
# will be large enough but we could end up with an empty
# row so chop that off
if workspaces_len <= (nrows-1)*ncols:
nrows -= 1
fig, axes = plt.subplots(nrows, ncols, squeeze=False,
subplot_kw=dict(projection=PROJECTION))
row_idx, col_idx = 0, 0
for subplot_idx in range(nrows*ncols):
ax = axes[row_idx][col_idx]
if subplot_idx < workspaces_len:
ws = workspaces[subplot_idx]
ax.set_title(ws.name())
pcm = ax.pcolormesh(ws, cmap=DEFAULT_COLORMAP)
xticks = ax.get_xticklabels()
map(lambda lbl: lbl.set_rotation(45), xticks)
if col_idx < ncols - 1:
col_idx += 1
else:
row_idx += 1
col_idx = 0
else:
# nothing here
ax.axis('off')
# Adjust locations to ensure the plots don't overlap
fig.subplots_adjust(wspace=SUBPLOT_WSPACE, hspace=SUBPLOT_HSPACE)
fig.colorbar(pcm, ax=axes.ravel().tolist(), pad=0.06)
fig.canvas.draw()
fig.show()
return fig
# Compatibility function for existing MantidPlot functionality
def plotSpectrum(workspaces, indices, distribution=None, error_bars=False,
type=None, window=None, clearWindow=None,
waterfall=False):
"""
Create a figure with a single subplot and for each workspace/index add a
line plot to the new axes. show() is called before returning the figure instance
:param workspaces: Workspace/workspaces to plot as a string, workspace handle, list of strings or list of
workspaces handles.
:param indices: A single int or list of ints specifying the workspace indices to plot
:param distribution: ``None`` (default) asks the workspace. ``False`` means
divide by bin width. ``True`` means do not divide by bin width.
Applies only when the the workspace is a MatrixWorkspace histogram.
:param error_bars: If true then error bars will be added for each curve
:param type: curve style for plot (-1: unspecified; 0: line, default; 1: scatter/dots)
:param window: Ignored. Here to preserve backwards compatibility
:param clearWindow: Ignored. Here to preserve backwards compatibility
:param waterfall:
"""
if type == 1:
fmt = 'o'
else:
fmt = '-'
return plot(workspaces, wksp_indices=indices,
errors=error_bars, fmt=fmt)
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/userdemo/connectionstyle_demo.py | 1 | 3917 | """
====================
Connectionstyle Demo
====================
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.axes_grid import AxesGrid
from matplotlib.offsetbox import AnchoredText
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig = plt.figure(1, figsize=(8, 5))
fig.clf()
def add_at(ax, t, loc=2):
fp = dict(size=8)
_at = AnchoredText(t, loc=loc, prop=fp)
ax.add_artist(_at)
return _at
grid = AxesGrid(fig, 111, (3, 5), label_mode="1", share_all=True)
grid[0].set_autoscale_on(False)
x1, y1 = 0.3, 0.3
x2, y2 = 0.7, 0.7
def demo_con_style(ax, connectionstyle, label=None):
if label is None:
label = connectionstyle
x1, y1 = 0.3, 0.2
x2, y2 = 0.8, 0.6
ax.plot([x1, x2], [y1, y2], ".")
ax.annotate("",
xy=(x1, y1), xycoords='data',
xytext=(x2, y2), textcoords='data',
arrowprops=dict(arrowstyle="->",
color="0.5",
shrinkA=5, shrinkB=5,
patchA=None,
patchB=None,
connectionstyle=connectionstyle,
),
)
add_at(ax, label, loc=2)
column = grid.axes_column[0]
demo_con_style(column[0], "angle3,angleA=90,angleB=0",
label="angle3,\nangleA=90,\nangleB=0")
demo_con_style(column[1], "angle3,angleA=0,angleB=90",
label="angle3,\nangleA=0,\nangleB=90")
column = grid.axes_column[1]
demo_con_style(column[0], "arc3,rad=0.")
demo_con_style(column[1], "arc3,rad=0.3")
demo_con_style(column[2], "arc3,rad=-0.3")
column = grid.axes_column[2]
demo_con_style(column[0], "angle,angleA=-90,angleB=180,rad=0",
label="angle,\nangleA=-90,\nangleB=180,\nrad=0")
demo_con_style(column[1], "angle,angleA=-90,angleB=180,rad=5",
label="angle,\nangleA=-90,\nangleB=180,\nrad=5")
demo_con_style(column[2], "angle,angleA=-90,angleB=10,rad=5",
label="angle,\nangleA=-90,\nangleB=10,\nrad=0")
column = grid.axes_column[3]
demo_con_style(column[0], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=0",
label="arc,\nangleA=-90,\nangleB=0,\narmA=30,\narmB=30,\nrad=0")
demo_con_style(column[1], "arc,angleA=-90,angleB=0,armA=30,armB=30,rad=5",
label="arc,\nangleA=-90,\nangleB=0,\narmA=30,\narmB=30,\nrad=5")
demo_con_style(column[2], "arc,angleA=-90,angleB=0,armA=0,armB=40,rad=0",
label="arc,\nangleA=-90,\nangleB=0,\narmA=0,\narmB=40,\nrad=0")
column = grid.axes_column[4]
demo_con_style(column[0], "bar,fraction=0.3",
label="bar,\nfraction=0.3")
demo_con_style(column[1], "bar,fraction=-0.3",
label="bar,\nfraction=-0.3")
demo_con_style(column[2], "bar,angle=180,fraction=-0.2",
label="bar,\nangle=180,\nfraction=-0.2")
grid[0].set_xlim(0, 1)
grid[0].set_ylim(0, 1)
grid.axes_llc.axis["bottom"].toggle(ticklabels=False)
grid.axes_llc.axis["left"].toggle(ticklabels=False)
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
pltshow(plt)
| mit |
tomsilver/NAB | nab/runner.py | 1 | 9309 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import multiprocessing
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.corpus import Corpus
from nab.detectors.base import detectDataSet
from nab.labeler import CorpusLabel
from nab.optimizer import optimizeThreshold
from nab.scorer import scoreCorpus
from nab.util import updateThresholds
class Runner(object):
"""
Class to run an endpoint (detect, optimize, or score) on the NAB
benchmark using the specified set of profiles, thresholds, and/or detectors.
"""
def __init__(self,
dataDir,
resultsDir,
labelPath,
profilesPath,
thresholdPath,
numCPUs=None):
"""
@param dataDir (string) Directory where all the raw datasets exist.
@param resultsDir (string) Directory where the detector anomaly scores
will be scored.
@param labelPath (string) Path where the labels of the datasets
exist.
@param profilesPath (string) Path to JSON file containing application
profiles and associated cost matrices.
@param thresholdPath (string) Path to thresholds dictionary containing the
best thresholds (and their corresponding
score) for a combination of detector and
user profile.
@probationaryPercent (float) Percent of each dataset which will be
ignored during the scoring process.
@param numCPUs (int) Number of CPUs to be used for calls to
multiprocessing.pool.map
"""
self.dataDir = dataDir
self.resultsDir = resultsDir
self.labelPath = labelPath
self.profilesPath = profilesPath
self.thresholdPath = thresholdPath
self.pool = multiprocessing.Pool(numCPUs)
self.probationaryPercent = 0.15
self.windowSize = 0.10
self.corpus = None
self.corpusLabel = None
self.profiles = None
def initialize(self):
"""Initialize all the relevant objects for the run."""
self.corpus = Corpus(self.dataDir)
self.corpusLabel = CorpusLabel(path=self.labelPath, corpus=self.corpus)
with open(self.profilesPath) as p:
self.profiles = json.load(p)
def detect(self, detectors):
"""Generate results file given a dictionary of detector classes
Function that takes a set of detectors and a corpus of data and creates a
set of files storing the alerts and anomaly scores given by the detectors
@param detectors (dict) Dictionary with key value pairs of a
detector name and its corresponding
class constructor.
"""
print "\nRunning detection step"
count = 0
args = []
for detectorName, detectorConstructor in detectors.iteritems():
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
args.append(
(
count,
detectorConstructor(
dataSet=dataSet,
probationaryPercent=self.probationaryPercent),
detectorName,
self.corpusLabel.labels[relativePath]["label"],
self.resultsDir,
relativePath
)
)
count += 1
self.pool.map(detectDataSet, args)
def optimize(self, detectorNames):
"""Optimize the threshold for each combination of detector and profile.
@param detectorNames (list) List of detector names.
@return thresholds (dict) Dictionary of dictionaries with detector names
then profile names as keys followed by another
dictionary containing the score and the
threshold used to obtained that score.
"""
print "\nRunning optimize step"
scoreFlag = False
thresholds = {}
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
thresholds[detectorName] = {}
for profileName, profile in self.profiles.iteritems():
thresholds[detectorName][profileName] = optimizeThreshold(
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
updateThresholds(thresholds, self.thresholdPath)
return thresholds
def score(self, detectorNames, thresholds):
"""Score the performance of the detectors.
Function that must be called only after detection result files have been
generated and thresholds have been optimized. This looks at the result files
and scores the performance of each detector specified and stores these
results in a csv file.
@param detectorNames (list) List of detector names.
@param thresholds (dict) Dictionary of dictionaries with detector
names then profile names as keys followed by
another dictionary containing the score and
the threshold used to obtained that score.
"""
print "\nRunning scoring step"
scoreFlag = True
baselines = {}
self.resultsFiles = []
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
for profileName, profile in self.profiles.iteritems():
threshold = thresholds[detectorName][profileName]["threshold"]
resultsDF = scoreCorpus(threshold,
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
scorePath = os.path.join(resultsDetectorDir, "%s_%s_scores.csv" %\
(detectorName, profileName))
resultsDF.to_csv(scorePath, index=False)
print "%s detector benchmark scores written to %s" %\
(detectorName, scorePath)
self.resultsFiles.append(scorePath)
def normalize(self):
"""Normalize the detectors' scores according to the Baseline, and print to
the console.
Function can only be called with the scoring step (i.e. runner.score())
preceding it.
This reads the total score values from the results CSVs, and
adds the relevant baseline value. The scores are then normalized by
multiplying by 100/perfect, where the perfect score is the number of TPs
possible (i.e. 44.0).
Note the results CSVs still contain the original scores, not normalized.
"""
print "\nRunning score normalization step"
# Get baselines for each application profile.
baselineDir = os.path.join(self.resultsDir, "baseline")
if not os.path.isdir(baselineDir):
raise IOError("No results directory for baseline. You must "
"run the baseline detector before normalizing scores.")
baselines = {}
for profileName, _ in self.profiles.iteritems():
fileName = os.path.join(baselineDir,
"baseline_" + profileName + "_scores.csv")
with open(fileName) as f:
results = pandas.read_csv(f)
baselines[profileName] = results["Score"].iloc[-1]
# Normalize the score from each results file.
for resultsFile in self.resultsFiles:
profileName = [k for k in baselines.keys() if k in resultsFile][0]
base = baselines[profileName]
with open(resultsFile) as f:
results = pandas.read_csv(f)
perfect = 44.0 - base
score = (-base + results["Score"].iloc[-1]) * (100/perfect)
print ("Final score for \'%s\' = %.2f"
% (resultsFile.split('/')[-1][:-4], score))
| gpl-3.0 |
anhaidgroup/py_entitymatching | py_entitymatching/feature/addfeatures.py | 1 | 17935 | """
This module contains functions to add a feature to feature table.
"""
import logging
import pandas as pd
import six
from py_entitymatching.utils.validation_helper import validate_object_type
logger = logging.getLogger(__name__)
def get_feature_fn(feature_string, tokenizers, similarity_functions):
"""
This function creates a feature in a declarative manner.
Specifically, this function uses the feature string, parses it and
compiles it into a function using the given tokenizers and similarity
functions. This compiled function will take in two tuples and return a
feature value (typically a number).
Args:
feature_string (string): A feature expression
to be converted into a function.
tokenizers (dictionary): A Python dictionary containing tokenizers.
Specifically, the dictionary contains tokenizer names as keys and
tokenizer functions as values. The tokenizer function typically
takes in a string and returns a list of tokens.
similarity_functions (dictionary): A Python dictionary containing
similarity functions. Specifically, the dictionary contains
similarity function names as keys and similarity functions as
values. The similarity function typically
takes in a string or two lists of tokens and returns a number.
Returns:
This function returns a Python dictionary which contains sufficient
information (such as attributes, tokenizers, function code) to be added
to the feature table.
Specifically the Python dictionary contains the following keys:
'left_attribute', 'right_attribute',
'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction', 'function', and
'function_source'.
For all the keys except the 'function' and 'function_source' the
value will be either a valid string (if the input feature string is
parsed correctly) or PARSE_EXP (if the parsing was not successful).
The 'function' will have a valid Python function as value,
and 'function_source' will have the Python function's source in
string format.
The created function is a self-contained function
which means that the tokenizers and sim functions that it calls are
bundled along with the returned function code.
Raises:
AssertionError: If `feature_string` is not of type string.
AssertionError: If the input `tokenizers` is not of type
dictionary.
AssertionError: If the input `similarity_functions` is not of
type dictionary.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> block_t = em.get_tokenizers_for_blocking()
>>> block_s = em.get_sim_funs_for_blocking()
>>> block_f = em.get_features_for_blocking(A, B)
>>> r = get_feature_fn('jaccard(qgm_3(ltuple.name), qgm_3(rtuple.name)', block_t, block_s)
>>> em.add_feature(block_f, 'name_name_jac_qgm3_qgm3', r)
>>> match_t = em.get_tokenizers_for_matching()
>>> match_s = em.get_sim_funs_for_matching()
>>> match_f = em.get_features_for_matching(A, B)
>>> r = get_feature_fn('jaccard(qgm_3(ltuple.name), qgm_3(rtuple.name)', match_t, match_s)
>>> em.add_feature(match_f, 'name_name_jac_qgm3_qgm3', r)
See Also:
:meth:`py_entitymatching.get_sim_funs_for_blocking`,
:meth:`py_entitymatching.get_tokenizers_for_blocking`,
:meth:`py_entitymatching.get_sim_funs_for_matching`,
:meth:`py_entitymatching.get_tokenizers_for_matching`
"""
# Validate input parameters
# # We expect the input feature string to be of type string
validate_object_type(feature_string, six.string_types, error_prefix='Input feature')
# # We expect the input object tokenizers to be of type python dictionary
validate_object_type(tokenizers, dict, error_prefix='Input object (tokenizers)')
# # We expect the input object similarity functions to be of type python
# dictionary
validate_object_type(similarity_functions, dict, error_prefix='Input object (similarity_functions)')
# Initialize a dictionary to have tokenizers/similarity functions
dict_to_compile = {}
# Update the dictionary with similarity functions
if len(similarity_functions) > 0:
dict_to_compile.update(similarity_functions)
# Update the dictionary with tokenizers
if len(tokenizers) > 0:
dict_to_compile.update(tokenizers)
# Create a python function string based on the input feature string
function_string = 'def fn(ltuple, rtuple):\n'
function_string += ' '
function_string += 'return ' + feature_string
# Parse the feature string to get the tokenizer, sim. function, and the
# attribute that it is operating on
parsed_dict = _parse_feat_str(feature_string, tokenizers,
similarity_functions)
# Compile the function string using the constructed dictionary
six.exec_(function_string, dict_to_compile)
# Update the parsed dict with the function and the function source
parsed_dict['function'] = dict_to_compile['fn']
parsed_dict['function_source'] = function_string
# Finally, return the parsed dictionary
return parsed_dict
# parse input feature string
def _parse_feat_str(feature_string, tokenizers, similarity_functions):
"""
This function parses the feature string to get left attribute,
right attribute, tokenizer, similarity function
"""
# Validate the input parameters
# # We expect the input feature string to be of type string
validate_object_type(feature_string, six.string_types, error_prefix='Input feature')
# # We expect the input object tokenizers to be of type python dictionary
validate_object_type(tokenizers, dict, error_prefix='Input object (tokenizers)')
# # We expect the input object similarity functions to be of type python
# dictionary
validate_object_type(similarity_functions, dict, error_prefix='Input object (similarity_functions)')
# We will have to parse the feature string. Specifically we use pyparsing
# module for the parsing purposes
from pyparsing import Word, alphanums, ParseException
# initialization attributes, tokenizers and similarity function parsing
# result
left_attribute = 'PARSE_EXP'
right_attribute = 'PARSE_EXP'
left_attr_tokenizer = 'PARSE_EXP'
right_attr_tokenizer = 'PARSE_EXP'
sim_function = 'PARSE_EXP'
exception_flag = False
# Define structures for each type such as attribute name, tokenizer
# function
attr_name = Word(alphanums + "_" + "." + "[" + "]" + '"' + "'")
tok_fn = Word(alphanums + "_") + "(" + attr_name + ")"
wo_tok = Word(alphanums + "_") + "(" + attr_name + "," + attr_name + ")"
wi_tok = Word(alphanums + "_") + "(" + tok_fn + "," + tok_fn + ")"
feat = wi_tok | wo_tok
# Try to parse the string
try:
parsed_string = feat.parseString(feature_string)
except ParseException as _:
exception_flag = True
if not exception_flag:
# Parse the tokenizers
parsed_tokenizers = [value for value in parsed_string
if value in tokenizers.keys()]
if len(parsed_tokenizers) is 2:
left_attr_tokenizer = parsed_tokenizers[0]
right_attr_tokenizer = parsed_tokenizers[1]
# Parse the similarity functions
parsed_similarity_function = [value for value in parsed_string
if value in similarity_functions.keys()]
if len(parsed_similarity_function) == 1:
sim_function = parsed_similarity_function[0]
# Parse the left attribute
attribute = [value for value in parsed_string
if value.startswith('ltuple[')]
if len(attribute) == 1:
attribute = attribute[0]
left_attribute = attribute[7:len(attribute) - 1].strip('"').strip(
"'")
# Parse the right attribute
attribute = [val for val in parsed_string if val.startswith('rtuple[')]
if len(attribute) == 1:
attribute = attribute[0]
right_attribute = attribute[7:len(attribute) - 1].strip('"').strip(
"'")
else:
pass
# Return the parsed information in a dictionary format.
parsed_dict = {'left_attribute': left_attribute,
'right_attribute': right_attribute,
'left_attr_tokenizer': left_attr_tokenizer,
'right_attr_tokenizer': right_attr_tokenizer,
'simfunction': sim_function,
'is_auto_generated': False}
return parsed_dict
def add_feature(feature_table, feature_name, feature_dict):
"""
Adds a feature to the feature table.
Specifically, this function is used in combination with
:meth:`~py_entitymatching.get_feature_fn`.
First the user creates a dictionary using :meth:`~py_entitymatching.get_feature_fn`,
then the user uses this function to add feature_dict to the feature table.
Args:
feature_table (DataFrame): A DataFrame containing features.
feature_name (string): The name that should be given to the feature.
feature_dict (dictionary): A Python dictionary, that is typically
returned by executing :meth:`~py_entitymatching.get_feature_fn`.
Returns:
A Boolean value of True is returned if the addition was successful.
Raises:
AssertionError: If the input `feature_table` is not of type
pandas DataFrame.
AssertionError: If `feature_name` is not of type
string.
AssertionError: If `feature_dict` is not of type
Python dictionary.
AssertionError: If the `feature_table` does not have necessary columns
such as 'feature_name', 'left_attribute', 'right_attribute',
'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction', 'function', and
'function_source' in the DataFrame.
AssertionError: If the `feature_name` is already present in the feature
table.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> block_t = em.get_tokenizers_for_blocking()
>>> block_s = em.get_sim_funs_for_blocking()
>>> block_f = em.get_features_for_blocking(A, B)
>>> r = get_feature_fn('jaccard(qgm_3(ltuple.name), qgm_3(rtuple.name)', block_t, block_s)
>>> em.add_feature(block_f, 'name_name_jac_qgm3_qgm3', r)
>>> match_t = em.get_tokenizers_for_matching()
>>> match_s = em.get_sim_funs_for_matching()
>>> match_f = em.get_features_for_matching(A, B)
>>> r = get_feature_fn('jaccard(qgm_3(ltuple.name), qgm_3(rtuple.name)', match_t, match_s)
>>> em.add_feature(match_f, 'name_name_jac_qgm3_qgm3', r)
"""
# Validate input parameters
# # We expect the feature_table to be of pandas DataFrame
validate_object_type(feature_table, pd.DataFrame, 'Input feature table')
# # We expect the feature_name to be of type string
validate_object_type(feature_name, six.string_types, 'Input feature name')
# # We expect the feature_dict to be of type python dictionary
validate_object_type(feature_dict, dict, 'Input feature dictionary')
# # We expect the feature table to contain certain columns
missing_columns = get_missing_column_values(feature_table.columns)
if missing_columns:
error_msg = "Feature table does not have all required columns\n The following columns are missing: {0}".format(", ".join(missing_columns))
raise AssertionError(error_msg)
feature_names = list(feature_table['feature_name'])
if feature_name in feature_names:
logger.error('Input feature name is already present in feature table')
raise AssertionError(
'Input feature name is already present in feature table')
# Add feature to the feature table at last
feature_dict['feature_name'] = feature_name
if len(feature_table) > 0:
feature_table.loc[len(feature_table)] = feature_dict
else:
feature_table.columns = ['feature_name', 'left_attribute',
'right_attribute', 'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction',
'function',
'function_source',
'is_auto_generated']
feature_table.loc[len(feature_table)] = feature_dict
# Finally, return True if everything was fine
return True
def get_missing_column_values(column):
required_columns_items = ['feature_name', 'left_attribute', 'right_attribute',
'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction', 'function',
'function_source', 'is_auto_generated']
return [item for item in required_columns_items if item not in column]
def create_feature_table():
"""
Creates an empty feature table.
"""
# Fix the column names
column_names = ['feature_name', 'left_attribute', 'right_attribute',
'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction', 'function',
'function_source', 'is_auto_generated']
# Create a pandas DataFrame using the column names
feature_table = pd.DataFrame(columns=column_names)
# Finally, return the feature table
return feature_table
def add_blackbox_feature(feature_table, feature_name, feature_function, **kwargs):
"""
Adds a black box feature to the feature table.
Args:
feature_table (DataFrame): The input DataFrame (typically a feature
table) to which the feature must be added.
feature_name (string): The name that should be given to the feature.
feature_function (Python function): A Python function for the black box
feature.
Returns:
A Boolean value of True is returned if the addition was successful.
Raises:
AssertionError: If the input `feature_table` is not of type
DataFrame.
AssertionError: If the input `feature_name` is not of type
string.
AssertionError: If the `feature_table` does not have necessary columns
such as 'feature_name', 'left_attribute', 'right_attribute',
'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction', 'function', and
'function_source' in the DataFrame.
AssertionError: If the `feature_name` is already present in the
feature table.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='ID')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='ID')
>>> block_f = em.get_features_for_blocking(A, B)
>>> def age_diff(ltuple, rtuple):
>>> # assume that the tuples have age attribute and values are valid numbers.
>>> return ltuple['age'] - rtuple['age']
>>> status = em.add_blackbox_feature(block_f, 'age_difference', age_diff)
"""
# Validate input parameters
# # We expect the feature_table to be of type pandas DataFrame
validate_object_type(feature_table, pd.DataFrame, 'Input feature table')
# # We expect the feature_name to be of type string
validate_object_type(feature_name, six.string_types, 'Input feature name')
# Check if the input feature table contains necessary columns
dummy_feature_table = create_feature_table()
if sorted(dummy_feature_table.columns) != sorted(feature_table.columns):
logger.error('Input feature table does not have the necessary columns')
raise AssertionError(
'Input feature table does not have the necessary columns')
# Check if the feature table already contains the given feature name
feat_names = list(feature_table['feature_name'])
if feature_name in feat_names:
logger.error('Input feature name is already present in feature table')
raise AssertionError(
'Input feature name is already present in feature table')
feature_dict = {}
feature_dict['feature_name'] = feature_name
feature_dict['function'] = feature_function
feature_dict['left_attribute'] = kwargs.get('left_attribute')
feature_dict['right_attribute'] = kwargs.get('right_attribute')
feature_dict['left_attr_tokenizer'] = kwargs.get('left_attr_tokenizer')
feature_dict['right_attr_tokenizer'] = kwargs.get('right_attr_tokenizer')
feature_dict['simfunction'] = kwargs.get('simfunction')
feature_dict['function_source'] = kwargs.get('function_source')
feature_dict['is_auto_generated'] = False
# Add the feature to the feature table as a last entry.
if len(feature_table) > 0:
feature_table.loc[len(feature_table)] = feature_dict
else:
feature_table.columns = ['feature_name', 'left_attribute',
'right_attribute', 'left_attr_tokenizer',
'right_attr_tokenizer', 'simfunction',
'function',
'function_source', 'is_auto_generated']
feature_table.loc[len(feature_table)] = feature_dict
# Finally return True if the addition was successful
return True
| bsd-3-clause |
std05048/Thesis | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
JosmanPS/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
coreymason/LAHacks2017 | web/linear_regression_engine.py | 1 | 2084 | import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import pandas as pd
import numpy as np
import sklearn.linear_model as skl
import matplotlib.pyplot as plt
reg = skl.LinearRegression()
data = pd.read_csv('sleep_quality_data.csv', index_col=0)
x_train = data.as_matrix(['temperature', 'humidity', 'brightness'])[:13]
y_train = data.as_matrix(['sleep quality'])[:13]
reg.fit (x_train, y_train)
# if there is a higher correlation coefficient
# then you want to maximise that variable, and vice versa
fields = ["Temperature", "Humidity", "Room brightness"]
index = 0
for cof in reg.coef_[0]:
suggestion = ""
if cof > 0.5:
suggestion += "increase " + fields[index] + ", "
print suggestion
index += 1
elif cof > 0:
suggestion += "slightly increase " + fields[index] + ", "
print suggestion
index += 1
elif cof < -0.5:
suggestion += "decrease " + fields[index] + ", "
print suggestion
index += 1
elif cof < 0:
suggestion += "slightly decrease " + fields[index] + ", "
print suggestion
index+=1
else:
suggestion += "it's fine " + ", "
print suggestion
index+=1
#print suggestion
x_test = data.as_matrix(['temperature', 'humidity', 'brightness'])[-1:]
#print x_test
predicted_value = reg.predict(x_test)
print predicted_value
# if predicted_value < 3:
# for cof in reg.coef_[0]:
# suggestion = ""
# if cof > 0.5:
# suggestion += "increase " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof > 0:
# suggestion += "slightly increase " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof < -0.5:
# suggestion += "decrease " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof < 0:
# suggestion += "slightly decrease " + fields[index] + ", "
# print suggestion
# index+=1
# else:
# suggestion += "it's fine " + ", "
# print suggestion
# index+=1
# plot data
data.plot(kind='scatter', x='temperature', y='sleep quality')
# plot the least squares line
plt.plot(x_test, predicted_value, c='red', linewidth=2)
#plt.show() | mit |
tcm129/trading-with-python | nautilus/nautilus.py | 77 | 5403 | '''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done." | bsd-3-clause |
rdempsey/python-for-sharing | practical-predictive-modeling-in-python/scoring code/bin/tlo_verification_and_matching.py | 1 | 3347 | #!/usr/bin/env python
# encoding: utf-8
"""
tlo_verification_and_matching.py
Created by Robert Dempsey on 05/18/2015
Updated by Robert Dempsey on 07/29/2015
"""
import pickle
def verify_record(record_scores):
"""
Given a pandas dataframe with the scores for a record, a record is either verified (1) or non-verified (0)
:param item: the set of scores for a record
:return: verification: 0 = non-verified, 1 = verified
"""
# Reload the trained model
tlo_classifier_file = "models/tlo_lr_classifier_07.28.15.dat"
logClassifier = pickle.load(open(tlo_classifier_file, "rb"))
# Return the prediction
return logClassifier.predict(record_scores)[0]
# print(logClassifier.predict(record_scores)[0])
def ssn_match(ssn_score):
"""
Given an SSN score a record is a match (1) or not a match (0)
:param ssn_score: the ssn score to test
:return: match: 0 = not-match, 1 = match
"""
if ssn_score == 300:
return 1
else:
return 0
def dob_match(dob_score):
"""
Given an DOB score a record is either a match (1) or not a match (0)
:param dob_score: the dob score to test
:return: match: 0 = not-match, 1 = match
"""
if dob_score == 300:
return 1
else:
return 0
def name_match(full_name_check_value, last_name_check_value, name_scores):
"""
Given all of the name scores, if any of them = 300 then we have a match
:return: match: 0 = not-match; 1 = match
"""
if full_name_check_value == 1 or last_name_check_value == 1:
return 1
for v in name_scores:
if v >= 280:
return 1
return 0
def determine_review_type(full_name_check_value, verified, name_scores):
"""
Determines the type of review needed for a record given N1 and N2 scores
Rule 2: If any name score is 280 or above, no review because it's verified
Rule 2: If 280 > name_score >= 260 => Flag for visual review
Rule 3: Everything else is flagged for alias review
:param n1_score:
:param n2_score:
:return: review_type
"""
if full_name_check_value == 1 or verified == 1:
return ""
for v in name_scores:
if v >= 280:
return ""
for v in name_scores:
if 279 >= v >= 260:
return "VISUAL"
return ""
def explain_failure(ssn_match_score, dob_match_score, name_match_score):
"""
Explains the reason for a record failing one or more checks
"""
reason = ""
if ssn_match_score == 0:
reason += "SSN "
if dob_match_score == 0:
reason += "DOB "
if name_match_score == 0:
reason += "NAME "
return reason.strip()
def convert_failure_explanation_to_number(failure_explanation):
"""
Converts the provided failure explanation to a number
"""
failure_explanation = failure_explanation.lower()
if failure_explanation == 'dob':
return 0
elif failure_explanation == 'name':
return 1
elif failure_explanation == 'ssn dob name':
return 2
elif failure_explanation == 'ssn':
return 3
elif failure_explanation == 'ssn name':
return 4
elif failure_explanation == 'ssn dob':
return 5
elif failure_explanation == 'dob name':
return 6
else:
return 0 | mit |
sql-machine-learning/sqlflow | python/runtime/optimize/model_generation_test.py | 1 | 14369 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import numpy as np
import pandas as pd
import pyomo.environ as pyomo_env
from runtime.optimize.local import generate_model_with_data_frame, solve_model
from runtime.optimize.model_generation import (
IDENTIFIER_REGEX, assert_are_valid_tokens,
generate_objective_and_constraint_expr)
class TestAssertValidTokens(unittest.TestCase):
def is_identifier(self, token):
return IDENTIFIER_REGEX.fullmatch(token) is not None
def test_is_identifier(self):
tokens = ['a', '_', 'a123', '__', '_123']
for t in tokens:
self.assertTrue(self.is_identifier(t))
tokens = ['1', '123_', '3def']
for t in tokens:
self.assertFalse(self.is_identifier(t))
def test_assert_valid_tokens(self):
tokens = ['SUM', '(', 'finishing', '*', 'product', ')', '<=', '100']
# valid expression
assert_are_valid_tokens(columns=['finishing', 'product'],
tokens=tokens,
result_value_name='product')
# invalid group_by
with self.assertRaises(AssertionError):
assert_are_valid_tokens(columns=['finishing', 'product'],
tokens=tokens,
result_value_name='product',
group_by='invalid_group_by')
# tokens = None
with self.assertRaises(AssertionError):
assert_are_valid_tokens(columns=['finishing', 'product'],
tokens=None,
result_value_name='product')
# tokens = []
with self.assertRaises(AssertionError):
assert_are_valid_tokens(columns=['finishing', 'product'],
tokens=[],
result_value_name='product')
# tokens not inside columns
tokens = [
'SUM', '(', 'finishing', '*', 'invalid_token', ')', '<=', '100'
]
with self.assertRaises(AssertionError):
assert_are_valid_tokens(columns=['finishing', 'product'],
tokens=tokens,
result_value_name='product')
# tokens not inside columns but equal to result_value_name
# ignore cases
tokens = [
'SUM', '(', 'FinisHing', '*', 'pRoducT_VaLue', ')', '<=', '100'
]
assert_are_valid_tokens(columns=['finishing', 'product'],
tokens=tokens,
result_value_name='product_value')
class TestModelGenerationBase(unittest.TestCase):
def generate_objective(self, tokens, result_value_name):
obj_expr, _ = generate_objective_and_constraint_expr(
columns=self.data_frame.columns,
objective=tokens,
constraints=None,
variables=self.variables,
result_value_name=result_value_name,
variable_str="model.x",
data_str="DATA_FRAME")
return obj_expr
def generate_constraints(self, constraint, result_value_name):
_, c_expr = generate_objective_and_constraint_expr(
columns=self.data_frame.columns,
objective=None,
constraints=[constraint],
variables=self.variables,
result_value_name=result_value_name,
variable_str="model.x",
data_str="DATA_FRAME")
assert len(c_expr) == 1, "invalid constraint expression"
return c_expr[0]
class TestModelGenerationWithoutGroupBy(TestModelGenerationBase):
def setUp(self):
self.data_frame = pd.DataFrame(
data={
'product': ['soldier', 'train'],
'price': [27, 21],
'materials_cost': [10, 9],
'other_cost': [14, 10],
'finishing': [2, 1],
'carpentry': [1, 1],
'max_num': [40, 10000],
})
self.variables = ["product"]
def replace_objective_token(self, objective, old, new):
o = copy.copy(objective)
for i, token in enumerate(o):
if token == old:
o[i] = new
return o
def replace_constraint_token(self, constraint, old, new):
def replace_one_constraint(c):
c = copy.deepcopy(c)
for i, token in enumerate(c["tokens"]):
if token == old:
c["tokens"][i] = new
return c
if isinstance(constraint, (list, tuple)):
return [replace_one_constraint(c) for c in constraint]
else:
return replace_one_constraint(constraint)
def test_multiple_brackets(self):
constraint = {
"tokens": [
'SUM', '(', 'finishing', '*', 'product', '+', 'SUM', '(',
'product', ')', ')', '<=', '100'
]
}
c0, range0, vars0 = self.generate_constraints(
constraint, result_value_name='product')
result_value_name = "product_value"
c1, range1, vars1 = self.generate_constraints(
self.replace_constraint_token(constraint, "product",
result_value_name),
result_value_name)
self.assertEqual(c0, c1)
self.assertEqual(range0, range1)
self.assertEqual(vars0, vars1)
self.assertTrue(vars0 is None)
self.assertTrue(range0 is None)
self.assertEqual(
c0,
'sum([DATA_FRAME["finishing"][i_0]*model.x[i_0]+sum([model.x[i_1] '
'for i_1 in model.x]) for i_0 in model.x])<=100')
def test_model_generation(self):
objective = [
'SUM', '(', '(', 'price', '-', 'materials_cost', '-', 'other_cost',
')', '*', 'product', ')'
]
constraints = [
{
"tokens":
['SUM', '(', 'finishing', '*', 'product', ')', '<=', '100'],
},
{
"tokens":
['SUM', '(', 'carpentry', '*', 'product', ')', '<=', '80'],
},
{
"tokens": ['product', '<=', 'max_num']
},
]
result_value_name = "product_value"
obj_str1 = self.generate_objective(
self.replace_objective_token(objective, "product",
result_value_name), result_value_name)
obj_str2 = self.generate_objective(objective, "product")
self.assertEqual(obj_str1, obj_str2)
self.assertEqual(
obj_str1,
'sum([(DATA_FRAME["price"][i_0]-DATA_FRAME["materials_cost"][i_0]-'
'DATA_FRAME["other_cost"][i_0])*model.x[i_0] for i_0 in model.x])')
const_01, range_01, vars_01 = self.generate_constraints(
self.replace_constraint_token(constraints[0], "product",
result_value_name),
result_value_name)
const_02, range_02, vars_02 = self.generate_constraints(
constraints[0], "product")
self.assertEqual(const_01, const_02)
self.assertEqual(range_01, range_02)
self.assertEqual(vars_01, vars_02)
self.assertTrue(range_01 is None)
self.assertTrue(vars_01 is None)
self.assertEqual(
const_01, 'sum([DATA_FRAME["finishing"][i_0]*model.x[i_0] '
'for i_0 in model.x])<=100')
const_11, range_11, vars_11 = self.generate_constraints(
self.replace_constraint_token(constraints[1], "product",
result_value_name),
result_value_name)
const_12, range_12, vars_12 = self.generate_constraints(
constraints[1], "product")
self.assertEqual(const_11, const_12)
self.assertEqual(range_11, range_12)
self.assertEqual(vars_11, vars_12)
self.assertTrue(range_11 is None)
self.assertTrue(vars_11 is None)
self.assertEqual(
const_11, 'sum([DATA_FRAME["carpentry"][i_0]*model.x[i_0] '
'for i_0 in model.x])<=80')
const_21, range_21, vars_21 = self.generate_constraints(
self.replace_constraint_token(constraints[2], "product",
result_value_name),
result_value_name)
const_22, range_22, vars_22 = self.generate_constraints(
constraints[2], "product")
self.assertEqual(const_21, const_22)
self.assertEqual(range_21, range_22)
self.assertEqual(vars_21, vars_22)
self.assertEqual(range_21, "model.x")
self.assertEqual(vars_21, ["__index"])
self.assertEqual(const_21,
'model.x[__index]<=DATA_FRAME["max_num"][__index]')
# TODO(sneaxiy): need to add more tests to generated models
model1 = generate_model_with_data_frame(data_frame=self.data_frame,
variables=self.variables,
variable_type="Integers",
result_value_name="product",
objective=objective,
direction="maximize",
constraints=constraints)
self.assertTrue(isinstance(model1, pyomo_env.ConcreteModel))
result_x, result_y = solve_model(model1, 'glpk')
self.assertTrue(
np.array_equal(result_x, np.array([20, 60], dtype='int64')))
self.assertEqual(result_y, 180)
model2 = generate_model_with_data_frame(
data_frame=self.data_frame,
variables=self.variables,
variable_type="Reals",
result_value_name=result_value_name,
objective=self.replace_objective_token(objective, "product",
result_value_name),
direction="minimize",
constraints=self.replace_constraint_token(constraints, "product",
result_value_name))
self.assertTrue(isinstance(model2, pyomo_env.ConcreteModel))
with self.assertRaises(ValueError):
solve_model(model2, 'glpk')
class TestModelGenerationWithGroupBy(TestModelGenerationBase):
def setUp(self):
self.data_frame = pd.DataFrame(
data={
'plants': ["plantA", "plantA", "plantB", "plantB"],
'markets': ["marketA", "marketB", "marketA", "marketB"],
'distance': [140, 210, 300, 90],
'capacity': [100, 100, 90, 90],
'demand': [130, 60, 130, 60]
})
self.variables = ["plants", "markets"]
self.result_value_name = "shipment"
def test_main(self):
objective = [
'SUM', '(', 'distance', '*', 'shipment', '*', '90', '/', '1000',
')'
]
constraints = [
{
"tokens": ['SUM', '(', 'shipment', ')', '<=', 'capacity'],
"group_by": "plants",
},
{
"tokens": ['SUM', '(', 'shipment', ')', '>=', 'demand'],
"group_by": "markets",
},
{
"tokens": ['shipment', '*', '100', '>=', 'demand'],
},
]
obj_func = self.generate_objective(objective, self.result_value_name)
self.assertEqual(
obj_func, 'sum([DATA_FRAME["distance"][i_0]*model.x[i_0]*90/1000 '
'for i_0 in model.x])')
const_0, range_0, vars_0 = self.generate_constraints(
constraints[0], self.result_value_name)
self.assertEqual(
const_0, 'sum([model.x[i_0] for i_0 in __import__("numpy")'
'.where(DATA_FRAME["plants"] == __value)[0]])'
'<=DATA_FRAME["capacity"][__index]')
self.assertEqual(
range_0, 'zip(*__import__("numpy").unique(DATA_FRAME["plants"], '
'return_index=True))')
self.assertEqual(vars_0, ["__value", "__index"])
const_1, range_1, vars_1 = self.generate_constraints(
constraints[1], self.result_value_name)
self.assertEqual(
const_1, 'sum([model.x[i_0] for i_0 in __import__("numpy").'
'where(DATA_FRAME["markets"] == __value)[0]])>='
'DATA_FRAME["demand"][__index]')
self.assertEqual(
range_1, 'zip(*__import__("numpy").unique(DATA_FRAME["markets"], '
'return_index=True))')
self.assertEqual(vars_1, ["__value", "__index"])
const_2, range_2, vars_2 = self.generate_constraints(
constraints[2], self.result_value_name)
self.assertEqual(
const_2, 'model.x[__index]*100>=DATA_FRAME["demand"][__index]')
self.assertEqual(range_2, 'model.x')
self.assertEqual(vars_2, ["__index"])
model = generate_model_with_data_frame(
data_frame=self.data_frame,
variables=self.variables,
variable_type="NonNegativeIntegers",
result_value_name=self.result_value_name,
objective=objective,
direction="minimize",
constraints=constraints)
self.assertTrue(isinstance(model, pyomo_env.ConcreteModel))
result_x, result_y = solve_model(model, 'baron')
self.assertTrue(
np.array_equal(result_x, np.array([99, 1, 31, 59], dtype='int64')))
self.assertAlmostEqual(result_y, 2581.2)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
nadvamir/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
jviada/QuantEcon.py | examples/illustrates_lln.py | 7 | 1802 | """
Filename: illustrates_lln.py
Authors: John Stachurski and Thomas J. Sargent
Visual illustration of the law of large numbers.
"""
import random
import numpy as np
from scipy.stats import t, beta, lognorm, expon, gamma, poisson
import matplotlib.pyplot as plt
n = 100
# == Arbitrary collection of distributions == #
distributions = {"student's t with 10 degrees of freedom": t(10),
"beta(2, 2)": beta(2, 2),
"lognormal LN(0, 1/2)": lognorm(0.5),
"gamma(5, 1/2)": gamma(5, scale=2),
"poisson(4)": poisson(4),
"exponential with lambda = 1": expon(1)}
# == Create a figure and some axes == #
num_plots = 3
fig, axes = plt.subplots(num_plots, 1, figsize=(10, 10))
# == Set some plotting parameters to improve layout == #
bbox = (0., 1.02, 1., .102)
legend_args = {'ncol': 2,
'bbox_to_anchor': bbox,
'loc': 3,
'mode': 'expand'}
plt.subplots_adjust(hspace=0.5)
for ax in axes:
# == Choose a randomly selected distribution == #
name = random.choice(list(distributions.keys()))
distribution = distributions.pop(name)
# == Generate n draws from the distribution == #
data = distribution.rvs(n)
# == Compute sample mean at each n == #
sample_mean = np.empty(n)
for i in range(n):
sample_mean[i] = np.mean(data[:i+1])
# == Plot == #
ax.plot(list(range(n)), data, 'o', color='grey', alpha=0.5)
axlabel = r'$\bar X_n$' + ' for ' + r'$X_i \sim$' + ' ' + name
ax.plot(list(range(n)), sample_mean, 'g-', lw=3, alpha=0.6, label=axlabel)
m = distribution.mean()
ax.plot(list(range(n)), [m] * n, 'k--', lw=1.5, label=r'$\mu$')
ax.vlines(list(range(n)), m, data, lw=0.2)
ax.legend(**legend_args)
plt.show()
| bsd-3-clause |
sightmachine/SimpleCV | SimpleCV/MachineLearning/ShapeContextClassifier.py | 12 | 6161 | from SimpleCV.base import *
from SimpleCV.Features.Features import Feature, FeatureSet
from SimpleCV.Color import Color
from SimpleCV.ImageClass import Image
from SimpleCV.Features.Detection import ShapeContextDescriptor
import math
import scipy.stats as sps
"""
Classify an object based on shape context
"""
class ShapeContextClassifier():
def __init__(self,images,labels):
"""
Create a shape context classifier.
* *images* - a list of input binary images where the things
to be detected are white.
* *labels* - the names of each class of objects.
"""
# The below import has been done in init since it throws "Need scikits learn installed" for $import SimpleCV
try:
from sklearn import neighbors
except:
print "Need scikits learn installed"
self.imgMap = {}
self.ptMap = {}
self.descMap = {}
self.knnMap = {}
self.blobCount = {}
self.labels = labels
self.images = images
import warnings
warnings.simplefilter("ignore")
for i in range(0,len(images)):
print "precomputing " + images[i].filename
self.imgMap[labels[i]] = images[i]
pts,desc,count = self._image2FeatureVector(images[i])
self.blobCount[labels[i]] = count
self.ptMap[labels[i]] = pts
self.descMap[labels[i]] = desc
knn = neighbors.KNeighborsClassifier()
knn.fit(desc,range(0,len(pts)))
self.knnMap[labels[i]] = knn
def _image2FeatureVector(self,img):
"""
generate a list of points, SC descriptors, and the count of points
"""
#IMAGES MUST BE WHITE ON BLACK!
fulllist = []
raw_descriptors = []
blobs = img.findBlobs(minsize=50)
count = 0
if( blobs is not None ):
count = len(blobs)
for b in blobs:
fulllist += b._filterSCPoints()
raw_descriptors = blobs[0]._generateSC(fulllist)
return fulllist,raw_descriptors,count
def _getMatch(self,model_scd,test_scd):
correspondence,distance = self._doMatching(model_scd,test_scd)
return self._matchQuality(distances)
def _doMatching(self,model_name,test_scd):
myPts = len(test_scd)
otPts = len(self.ptMap[model_name])
# some magic metric that keeps features
# with a lot of points from dominating
#metric = 1.0 + np.log10( np.max([myPts,otPts])/np.min([myPts,otPts])) # <-- this could be moved to after the sum
otherIdx = []
distance = []
import warnings
warnings.simplefilter("ignore")
results = []
for sample in test_scd:
best = self.knnMap[model_name].predict(sample)
idx = best[0] # this is where we can play with k
scd = self.descMap[model_name][idx]
temp = np.sqrt(np.sum(((sample-scd)**2)))
#temp = 0.5*np.sum((sample-scd)**2)/np.sum((sample+scd))
if( math.isnan(temp) ):
temp = sys.maxint
distance.append(temp)
return [otherIdx,distance]
def _matchQuality(self,distances):
#distances = np.array(distances)
#sd = np.std(distances)
#x = np.mean(distances)
#min = np.min(distances)
# not sure trimmed mean is perfect
# realistically we should have some bimodal dist
# and we want to throw away stuff with awful matches
# so long as the number of points is not a huge
# chunk of our points.
#tmean = sps.tmean(distances,(min,x+sd))
tmean = np.mean(distances)
std = np.std(distances)
return tmean,std
def _buildMatchDict(self,image, countBlobs):
# we may want to base the count on the num best_matchesber of large blobs
points,descriptors,count = self._image2FeatureVector(image)
matchDict = {}
matchStd = {}
for key,value in self.descMap.items():
if( countBlobs and self.blobCount[key] == count ): # only do matching for similar number of blobs
#need to hold on to correspondences
correspondence, distances = self._doMatching(key,descriptors)
result,std = self._matchQuality(distances)
matchDict[key] = result
matchStd[key] = std
elif( not countBlobs ):
correspondence, distances = self._doMatching(key,descriptors)
result,std = self._matchQuality(distances)
matchDict[key] = result
matchStd[key] = std
return points,descriptors,count,matchDict, matchStd
def classify(self,image, blobFilter=True):
"""
Classify an input image.
* *image* - the input binary image.
* *blobFilter* - Do a first pass where you only match objects
that have the same number of blobs - speeds up computation
and match quality.
"""
points,descriptors,count,matchDict,matchStd = self._buildMatchDict(image, blobFilter)
best = sys.maxint
best_name = "No Match"
for k,v in matchDict.items():
if ( v < best ):
best = v
best_name = k
return best_name, best, matchDict, matchStd
def getTopNMatches(self,image,n=3, blobFilter = True):
"""
Classify an input image and return the top n results.
* *image* - the input binary image.
* *n* - the number of results to return.
* *blobFilter* - Do a first pass where you only match objects
that have the same number of blobs - speeds up computation
and match quality.
"""
n = np.clip(n,1,len(self.labels))
points,descriptors,count,matchDict,matchStd = self._buildMatchDict(image,blobFilter)
best_matches = list(sorted(matchDict, key=matchDict.__getitem__))
retList = []
for k in best_matches:
retList.append((k,matchDict[k]))
return retList[0:n], matchDict, matchStd
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_find_ecg_artifacts.py | 14 | 1313 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
chen0510566/MissionPlanner | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
ZENGXH/scikit-learn | sklearn/grid_search.py | 103 | 36232 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
sanja7s/SR_Twitter | src_CAPITAL/BI_assortatvitity.py | 1 | 2426 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
analyze assortativity of the graphs in terms of sentiment
'''
from igraph import *
import networkx as nx
import os
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
import matplotlib
import pandas as pd
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
sns.set_style('whitegrid')
import pandas as pd
from scipy import stats, integrate
f_in_user_labels = "usr_num_CVs.tab"
##################
f_in_user_taxons = "user_taxons.tab"
f_in_user_concepts = "user_concepts.tab"
f_in_user_entities = "user_entities.tab"
f_in_num_tweets = "usr_num_tweets.tab"
#########################
#
f_in_user_sentiment = "user_sentiment.tab"
# mention graph
#########################
f_in_graph = "threshold_mention_graphs/directed_threshold0.tab"
f_in_graph_weights = "threshold_mention_graphs/mention_graph_weights.dat"
f_out_sent_mention_graph = "directed_threshold0_sent_val.tab"
IN_DIR = "../../../DATA/CAPITAL/"
f_out_mention = "sentiment_assortativity_mention_2.txt"
#########################
f_in_graph_weights = "mention_graph_weights.dat"
os.chdir(IN_DIR)
#########################
# read from a file that is an edge list with weights
#########################
def read_in_graph():
# for mention and convultion it is directed
G = Graph.Read_Ncol(f_in_graph_weights, names=True, directed=True, weights=True)
# for reciprocal it is undirected
#G = Graph.Read_Ncol(f_in_graph, directed=False, weights=True)
print f_in_graph
return G
def read_BI():
return pd.read_csv('BI_indexR_full.txt',\
encoding='utf-8', delim_whitespace=1)
def BI_capital_assort():
bi = read_BI()
print max(bi['bi']), min(bi['bi'])
G = read_in_graph()
bidict = bi.set_index('id')['bi'].to_dict()
for el in bidict:
if bidict[el] > 1:
bidict[el] = 1
v = G.vs.select(name = str(el))
print el, v
v["bi"] = bidict[el]
to_delete_vertices = [v.index for v in G.vs if v["bi"] == None]
print len(to_delete_vertices)
G.delete_vertices(to_delete_vertices)
summary(G)
G.to_undirected(mode='mutual')
not_connected_nodes = G.vs(_degree_eq=0)
to_delete_vertices = not_connected_nodes
print len(to_delete_vertices)
G.delete_vertices(to_delete_vertices)
summary(G)
print "UNDIR BI assortativity is %f " % (G.assortativity("bi",directed=False))
BI_capital_assort()
| mit |
multivac61/pv | pv.py | 1 | 4658 | import argparse
from sys import float_info
from math import ceil
import numpy as np
from numpy.fft import fft, ifft
from scipy.io import wavfile
import matplotlib.pyplot as plt
def stretch(x, alpha, window, num_channels, synthesis_hop_factor):
"""
Perform time stretch of a factor alpha to signal x
x: input signal, alpha: time-stretch factor,
num_channels: ?, synthesis_hop_factor: ?,
returns: ?
"""
synthesis_hop_size = ceil(num_channels / synthesis_hop_factor)
analysis_hop_size = ceil(synthesis_hop_size / alpha)
# TODO: Should be able to completely reconstruct input signal if alpha == 1
x = np.pad(x, (synthesis_hop_size,))
y = np.zeros(max(x.size, ceil(x.size * alpha + x.size / analysis_hop_size * alpha)))
ys_old = float_info.epsilon
analysis_hop = synthesis_hop = 0
while analysis_hop <= x.size - (synthesis_hop_size + num_channels):
# Spectra of two consecutive windows
xs = fft(window * x[analysis_hop:analysis_hop + num_channels])
xt = fft(window * x[analysis_hop + synthesis_hop_size:analysis_hop + synthesis_hop_size + num_channels])
# IFFT and overlap and add
ys = xt * (ys_old / xs) / abs(ys_old / xs)
ys_old = ys
y_cur = y[synthesis_hop:synthesis_hop + num_channels]
y_cur = np.add(y_cur, window * np.real_if_close(ifft(ys)), out=y_cur, casting='unsafe')
analysis_hop += analysis_hop_size
synthesis_hop += synthesis_hop_size
# TODO: AM scaling due to window sliding
return y[synthesis_hop_size: ceil(x.size * alpha)]
def sin_signal(fs, duration, f0):
"""
Generate a sinusoidal signal
fs: sample frequency, duration: signal duration, f0: frequency
returns: sinusoid of frequency f0 and length duration*fs
"""
return np.sin(2 * np.pi * f0 * np.linspace(0, duration, int(duration * fs), endpoint=False))
def normalize(x):
"""
Normalize signal from (min(x), max(x)) to (-1, 1)
see: https://github.com/WeAreROLI/JUCE/blob/master/modules/juce_core/maths/juce_MathsFunctions.h#L127
"""
return -1 + 2 * (x - x.min()) / x.ptp()
if __name__ == '__main__':
windows = {'bartlett': np.bartlett, 'blackman': np.blackman, 'hamming': np.hamming, 'hanning': np.hanning,
'kaiser': np.kaiser}
parser = argparse.ArgumentParser(description='Phase Vocoder')
parser.add_argument('--input_filename', type=str, help='Input filename')
parser.add_argument('--test_duration', type=float, default=1.0, help='Test sin duration')
parser.add_argument('--test_frequency', type=float, default=440.0, help='Test sin frequency')
parser.add_argument('--test_sampling_frequency', type=int, default=44100, help='Test sin sampling frequency')
parser.add_argument('--stretch_factor', type=float, default=2, help='Stretch factor')
parser.add_argument('--num_channels', type=int, default=1024, help='Number of FFT channels')
parser.add_argument('--synthesis_hop_factor', type=float, default=4, help='Synthesis hop factor')
parser.add_argument('--window', choices=windows.keys(), default='hanning', help='Window function')
parser.add_argument('--window_size', type=int, default=1024, help='Window size')
parser.add_argument('--generate_figures', action='store_true', help='Should generate figures')
parser.add_argument('--output_filename', type=str, default='output.wav', help='Output filename')
args = parser.parse_args()
(sampling_frequency, input_data), _ = wavfile.read(
args.input_filename) if args.input_filename else args.test_sampling_frequency, sin_signal(
args.test_sampling_frequency, args.test_duration, args.test_frequency)
input_data = normalize(input_data)
symmetric_window = windows[args.window](args.window_size) + [0] # symmetric about (size - 1) / 2
output_data = stretch(input_data,
args.stretch_factor,
symmetric_window,
args.num_channels,
args.synthesis_hop_factor)
output_data = normalize(output_data)
wavfile.write(args.output_filename, int(sampling_frequency), output_data)
if args.generate_figures:
plt.subplot(2, 1, 1)
plt.plot(np.arange(input_data.size) / sampling_frequency, input_data)
plt.title('Input')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.subplot(2, 1, 2)
plt.plot(np.arange(output_data.size) / sampling_frequency, output_data)
plt.title('Output')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.show() | mit |
blaze/dask | dask/dataframe/io/tests/test_io.py | 3 | 22234 | import numpy as np
import pandas as pd
import pytest
from threading import Lock
from multiprocessing.pool import ThreadPool
import dask.array as da
import dask.dataframe as dd
from dask.dataframe._compat import tm
from dask.dataframe.io.io import _meta_from_array
from dask.delayed import Delayed, delayed
from dask.utils import tmpfile
from dask.dataframe.utils import assert_eq, is_categorical_dtype
####################
# Arrays and BColz #
####################
def test_meta_from_array():
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res[0].dtype == np.int64
assert res[1].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index([0, 1]))
x = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
res = _meta_from_array(x, columns=["a", "b"])
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.float64
tm.assert_index_equal(res.columns, pd.Index(["a", "b"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b", "c"])
np.random.seed(42)
x = np.random.rand(201, 2)
x = dd.from_array(x, chunksize=50, columns=["a", "b"])
assert len(x.divisions) == 6 # Should be 5 partitions and the end
def test_meta_from_1darray():
x = np.array([1.0, 2.0, 3.0], dtype=np.float64)
res = _meta_from_array(x)
assert isinstance(res, pd.Series)
assert res.dtype == np.float64
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns="x")
assert isinstance(res, pd.Series)
assert res.name == "x"
assert res.dtype == np.object_
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns=["x"])
assert isinstance(res, pd.DataFrame)
assert res["x"].dtype == np.object_
tm.assert_index_equal(res.columns, pd.Index(["x"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b"])
def test_meta_from_recarray():
x = np.array(
[(i, i * 10) for i in range(10)], dtype=[("a", np.float64), ("b", np.int64)]
)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(["a", "b"]))
res = _meta_from_array(x, columns=["b", "a"])
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(["b", "a"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b", "c"])
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list("abc"))
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index(["a", "b", "c"]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
with pytest.raises(ValueError):
dd.from_array(np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i * 10) for i in range(10)], dtype=[("a", "i4"), ("b", "i4")])
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert list(d.columns) == ["a", "b"]
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip("bcolz")
pool = ThreadPool(processes=5)
def check(i):
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert is_categorical_dtype(d.dtypes["a"])
assert list(d.x.compute(scheduler="sync")) == [1, 2, 3]
assert list(d.a.compute(scheduler="sync")) == ["a", "b", "a"]
d = dd.from_bcolz(t, chunksize=2, index="x")
L = list(d.index.compute(scheduler="sync"))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(
dd.from_bcolz(t, chunksize=2).dask
)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(
dd.from_bcolz(t, chunksize=3).dask
)
pool.map(check, range(5))
def test_from_bcolz():
bcolz = pytest.importorskip("bcolz")
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert is_categorical_dtype(d.dtypes["a"])
assert list(d.x.compute(scheduler="sync")) == [1, 2, 3]
assert list(d.a.compute(scheduler="sync")) == ["a", "b", "a"]
L = list(d.index.compute(scheduler="sync"))
assert L == [0, 1, 2]
d = dd.from_bcolz(t, chunksize=2, index="x")
L = list(d.index.compute(scheduler="sync"))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(
dd.from_bcolz(t, chunksize=2).dask
)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(
dd.from_bcolz(t, chunksize=3).dask
)
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4.0, "b"))
t.flush()
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(dsk)
def test_from_bcolz_no_lock():
bcolz = pytest.importorskip("bcolz")
locktype = type(Lock())
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"], chunklen=2
)
a = dd.from_bcolz(t, chunksize=2)
b = dd.from_bcolz(t, chunksize=2, lock=True)
c = dd.from_bcolz(t, chunksize=2, lock=False)
assert_eq(a, b)
assert_eq(a, c)
assert not any(isinstance(item, locktype) for v in c.dask.values() for item in v)
def test_from_bcolz_filename():
bcolz = pytest.importorskip("bcolz")
with tmpfile(".bcolz") as fn:
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]],
names=["x", "y", "a"],
rootdir=fn,
)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip("bcolz")
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().columns) == ["x", "y", "a"]
def test_from_pandas_dataframe():
a = list("aaaaaaabbbbbbbbccccccc")
df = pd.DataFrame(
dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start="20120101", periods=len(a)),
)
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
ddf = dd.from_pandas(df, chunksize=8)
msg = "Exactly one of npartitions and chunksize must be specified."
with pytest.raises(ValueError) as err:
dd.from_pandas(df, npartitions=2, chunksize=2)
assert msg in str(err.value)
with pytest.raises((ValueError, AssertionError)) as err:
dd.from_pandas(df)
assert msg in str(err.value)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({"x": [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
a = dd.from_pandas(df, chunksize=i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
for sort in [True, False]:
for i in [0, 2]:
df = pd.DataFrame({"x": [0] * i})
ddf = dd.from_pandas(df, npartitions=5, sort=sort)
assert_eq(df, ddf)
s = pd.Series([0] * i, name="x", dtype=int)
ds = dd.from_pandas(s, npartitions=5, sort=sort)
assert_eq(s, ds)
@pytest.mark.parametrize("n", [1, 2, 4, 5])
def test_from_pandas_npartitions_is_accurate(n):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
assert dd.from_pandas(df, npartitions=n).npartitions <= n
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n), index=pd.date_range(start="20120101", periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
ds = dd.from_pandas(s, chunksize=8)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
def test_from_pandas_single_row():
df = pd.DataFrame({"x": [1]}, index=[1])
ddf = dd.from_pandas(df, npartitions=1)
assert ddf.divisions == (1, 1)
assert_eq(ddf, df)
def test_from_pandas_with_datetime_index():
df = pd.DataFrame(
{
"Date": [
"2015-08-28",
"2015-08-27",
"2015-08-26",
"2015-08-25",
"2015-08-24",
"2015-08-21",
"2015-08-20",
"2015-08-19",
"2015-08-18",
],
"Val": list(range(9)),
}
)
df.Date = df.Date.astype("datetime64[ns]")
ddf = dd.from_pandas(df, 2)
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2)
assert_eq(df, ddf)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = dd.from_dask_array(x, ["a", "b", "c"])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df.columns, pd.Index(["a", "b", "c"]))
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(scheduler="sync").values == x.compute(scheduler="sync")).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=["a", "b", "c"])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df2.columns, df.columns)
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = dd.from_dask_array(x, "a")
assert isinstance(ser, dd.Series)
assert ser.name == "a"
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(scheduler="sync").values == x.compute(scheduler="sync")).all()
ser = dd.from_dask_array(x)
assert isinstance(ser, dd.Series)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert isinstance(ser2, dd.Series)
assert_eq(ser, ser2)
@pytest.mark.parametrize("as_frame", [True, False])
def test_from_dask_array_index(as_frame):
s = dd.from_pandas(pd.Series(range(10), index=list("abcdefghij")), npartitions=3)
if as_frame:
s = s.to_frame()
result = dd.from_dask_array(s.values, index=s.index)
assert_eq(s, result)
def test_from_dask_array_index_raises():
x = da.random.uniform(size=(10,), chunks=(5,))
with pytest.raises(ValueError) as m:
dd.from_dask_array(x, index=pd.Index(np.arange(10)))
assert m.match("must be an instance")
a = dd.from_pandas(pd.Series(range(12)), npartitions=2)
b = dd.from_pandas(pd.Series(range(12)), npartitions=4)
with pytest.raises(ValueError) as m:
dd.from_dask_array(a.values, index=b.index)
assert m.match("index")
assert m.match("number")
assert m.match("blocks")
assert m.match("4 != 2")
def test_from_dask_array_compat_numpy_array():
x = da.ones((3, 3, 3), chunks=2)
with pytest.raises(ValueError):
dd.from_dask_array(x) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute()) # numpy
x = da.ones((10, 3), chunks=(3, 3))
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))
with pytest.raises(ValueError):
dd.from_dask_array(x, columns=["a"]) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute(), columns=["a"]) # numpy
d1 = dd.from_dask_array(x, columns=["a", "b", "c"]) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(["a", "b", "c"]))
d2 = dd.from_array(x.compute(), columns=["a", "b", "c"]) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(["a", "b", "c"]))
def test_from_dask_array_compat_numpy_array_1d():
x = da.ones(10, chunks=3)
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name is None
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name is None
d1 = dd.from_dask_array(x, columns="name") # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name == "name"
d2 = dd.from_array(x.compute(), columns="name") # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name == "name"
# passing list via columns results in DataFrame
d1 = dd.from_dask_array(x, columns=["name"]) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(["name"]))
d2 = dd.from_array(x.compute(), columns=["name"]) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(["name"]))
def test_from_dask_array_struct_dtype():
x = np.array([(1, "a"), (2, "b")], dtype=[("a", "i4"), ("b", "object")])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
assert_eq(df, pd.DataFrame(x))
assert_eq(
dd.from_dask_array(y, columns=["b", "a"]), pd.DataFrame(x, columns=["b", "a"])
)
def test_from_dask_array_unknown_chunks():
# Series
dx = da.Array(
{("x", 0): np.arange(5), ("x", 1): np.arange(5, 11)},
"x",
((np.nan, np.nan),),
np.arange(1).dtype,
)
df = dd.from_dask_array(dx)
assert isinstance(df, dd.Series)
assert not df.known_divisions
assert_eq(df, pd.Series(np.arange(11)), check_index=False)
# DataFrame
dsk = {("x", 0, 0): np.random.random((2, 3)), ("x", 1, 0): np.random.random((5, 3))}
dx = da.Array(dsk, "x", ((np.nan, np.nan), (3,)), np.float64)
df = dd.from_dask_array(dx)
assert isinstance(df, dd.DataFrame)
assert not df.known_divisions
assert_eq(df, pd.DataFrame(dx.compute()), check_index=False)
# Unknown width
dx = da.Array(dsk, "x", ((np.nan, np.nan), (np.nan,)), np.float64)
with pytest.raises(ValueError):
df = dd.from_dask_array(dx)
def test_to_bag():
pytest.importorskip("dask.bag")
a = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute() == list(a.itertuples(False))
assert ddf.to_bag(True).compute() == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute() == list(a.x.iteritems())
assert ddf.x.to_bag().compute() == list(a.x)
def test_to_records():
pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(df.to_records(), ddf.to_records())
@pytest.mark.parametrize("lengths", [[2, 2], True])
def test_to_records_with_lengths(lengths):
pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
result = ddf.to_records(lengths=lengths)
assert_eq(df.to_records(), result)
assert isinstance(result, da.Array)
expected_chunks = ((2, 2),)
assert result.chunks == expected_chunks
def test_to_records_raises():
pytest.importorskip("dask.array")
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
with pytest.raises(ValueError):
ddf.to_records(lengths=[2, 2, 2])
pytest.fail("3 != 2")
with pytest.raises(ValueError):
ddf.to_records(lengths=5)
pytest.fail("Unexpected value")
def test_from_delayed():
df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list("abcd"))
parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]
dfs = [delayed(parts.__getitem__)(i) for i in range(4)]
meta = dfs[0].compute()
my_len = lambda x: pd.Series([len(x)])
for divisions in [None, [0, 1, 3, 6, 10]]:
ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)
assert_eq(ddf, df)
assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
s = dd.from_delayed([d.a for d in dfs], meta=meta.a, divisions=divisions)
assert_eq(s, df.a)
assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
meta2 = [(c, "f8") for c in df.columns]
assert_eq(dd.from_delayed(dfs, meta=meta2), df)
assert_eq(dd.from_delayed([d.a for d in dfs], meta=("a", "f8")), df.a)
with pytest.raises(ValueError):
dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])
with pytest.raises(ValueError) as e:
dd.from_delayed(dfs, meta=meta.a).compute()
assert str(e.value).startswith("Metadata mismatch found in `from_delayed`")
def test_from_delayed_misordered_meta():
df = pd.DataFrame(
columns=["(1)", "(2)", "date", "ent", "val"],
data=[range(i * 5, i * 5 + 5) for i in range(3)],
index=range(3),
)
# meta with different order for columns
misordered_meta = pd.DataFrame(
columns=["date", "ent", "val", "(1)", "(2)"], data=[range(5)]
)
ddf = dd.from_delayed([delayed(lambda: df)()], meta=misordered_meta)
with pytest.raises(ValueError) as info:
# produces dataframe which does not match meta
ddf.reset_index().compute(scheduler="sync")
msg = (
"The columns in the computed data do not match the columns in the"
" provided metadata"
)
assert msg in str(info.value)
def test_from_delayed_sorted():
a = pd.DataFrame({"x": [1, 2]}, index=[1, 10])
b = pd.DataFrame({"x": [4, 1]}, index=[100, 200])
A = dd.from_delayed([delayed(a), delayed(b)], divisions="sorted")
assert A.known_divisions
assert A.divisions == (1, 100, 200)
def test_to_delayed():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
# Frame
a, b = ddf.to_delayed()
assert isinstance(a, Delayed)
assert isinstance(b, Delayed)
assert_eq(a.compute(), df.iloc[:2])
# Scalar
x = ddf.x.sum()
dx = x.to_delayed()
assert isinstance(dx, Delayed)
assert_eq(dx.compute(), x)
def test_to_delayed_optimize_graph():
df = pd.DataFrame({"x": list(range(20))})
ddf = dd.from_pandas(df, npartitions=20)
ddf2 = (ddf + 1).loc[:2]
# Frame
d = ddf2.to_delayed()[0]
assert len(d.dask) < 20
d2 = ddf2.to_delayed(optimize_graph=False)[0]
assert sorted(d2.dask) == sorted(ddf2.dask)
assert_eq(ddf2.get_partition(0), d.compute())
assert_eq(ddf2.get_partition(0), d2.compute())
# Scalar
x = ddf2.x.sum()
dx = x.to_delayed()
dx2 = x.to_delayed(optimize_graph=False)
assert len(dx.dask) < len(dx2.dask)
assert_eq(dx.compute(), dx2.compute())
def test_from_dask_array_index_dtype():
x = da.ones((10,), chunks=(5,))
df = pd.DataFrame(
{
"date": pd.date_range("2019-01-01", periods=10, freq="1T"),
"val1": list(range(10)),
}
)
ddf = dd.from_pandas(df, npartitions=2).set_index("date")
ddf2 = dd.from_dask_array(x, index=ddf.index, columns="val2")
assert ddf.index.dtype == ddf2.index.dtype
assert ddf.index.name == ddf2.index.name
df = pd.DataFrame({"idx": np.arange(0, 1, 0.1), "val1": list(range(10))})
ddf = dd.from_pandas(df, npartitions=2).set_index("idx")
ddf2 = dd.from_dask_array(x, index=ddf.index, columns="val2")
assert ddf.index.dtype == ddf2.index.dtype
assert ddf.index.name == ddf2.index.name
| bsd-3-clause |
hep-cce/ml_classification_studies | cosmoDNN/GAN/GANlens.py | 2 | 10732 | from __future__ import print_function
import keras
from keras.models import Sequential, Model
from keras.layers import Input, Dense, Dropout, merge, Reshape
from keras import backend as K
import numpy as np
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D, Convolution2D
from keras.optimizers import SGD, RMSprop, Adadelta, Adam
from keras.utils import np_utils
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
if 'session' in locals() and session is not None:
print('Close interactive session')
session.close()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
#print(tf.__version__)
print(keras.__version__)
import os
os.environ["KERAS_BACKEND"] = "theano"
import time
import glob
K.set_image_dim_ordering('tf')
time_i = time.time()
from keras.preprocessing.image import ImageDataGenerator
data_augmentation = True
batch_size = 32
num_classes = 2
num_epoch = 1000 #2000
pre_train_size = 200
filter_factor = 40
learning_rate_gen = 1e-4 # Warning: lr and decay vary across optimizers
learning_rate_dis = 1e-5 # Warning: lr and decay vary across optimizers
learning_rate_gan = 1e-5 # Warning: lr and decay vary across optimizers
decay_rate = 0.1
dropout_rate = 0.25
opti_id = 1 # [SGD, Adam, Adadelta, RMSprop]
loss_id = 0 # [mse, mae] # mse is always better
Dir0 = '../../../'
Dir1 = Dir0 + 'AllTrainTestSets/Encoder/'
#Dir0 = './'
#Dir1 = '/home/nes/Desktop/ConvNetData/lens/Encoder/'
Dir2 = ['single/', 'stack/'][1]
data_path = Dir1 + Dir2
DirOutType = ['noisy0', 'noisy1', 'noiseless'] # check above too
image_size = img_rows = img_cols = 45
num_channel = 1
num_files = 9000
train_split = 0.8 # 80 percent
num_train = int(train_split*num_files)
fileOut = 'GAN' + str(opti_id) + '_loss' + str(loss_id) + '_lrGen_' + str(learning_rate_gen)+ '_lrDis_' + str(learning_rate_dis) + '_lrGAN_' +str(learning_rate_gan) + '_decay' + str(decay_rate) + '_batch' + str(batch_size) + '_epoch' + str(num_epoch)
def load_train(fnames):
img_data_list = []
filelist = sorted(glob.glob(fnames + '/*npy'))[:num_files]
for fileIn in filelist: # restricting #files now [:num_files]
# print(fileIn)
img_data = np.load(fileIn)
img_data = img_data[:28, :28] # comment later
# print(fileIn)
ravelTrue = False
if ravelTrue: img_data = np.ravel(np.array(img_data))
img_data = img_data.astype('float32')
img_data /= 255.
expandTrue = True
if expandTrue: img_data = np.expand_dims(img_data, axis=4)
# print (img_data.shape)
img_data_list.append(img_data)
# print(np.array(img_data_list).shape)
X_train = np.array(img_data_list)
# labels = np.load(fnames +'_5para.npy')[:num_files]
print (X_train.shape)
labels = np.ones([X_train.shape[0], ])
y_train = np_utils.to_categorical(labels, num_classes)
np.random.seed(12345)
shuffleOrder = np.arange(X_train.shape[0])
# np.random.shuffle(shuffleOrder)
# print(shuffleOrder)
X_train = X_train[shuffleOrder]
y_train = y_train[shuffleOrder]
return X_train, y_train
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
fnames = data_path + DirOutType[2] #'noiseless'
noiseless_data, noiseless_target = load_train(fnames)
x_train = noiseless_data[0:num_train]
y_train = noiseless_target[0:num_train]
x_val = noiseless_data[num_train:num_files]
y_val = noiseless_target[num_train:num_files]
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
# Build Generative model ...
def Generator():
# filter_factor = 40
g_input = Input(shape=[100])
H = Dense( filter_factor*14*14, init='glorot_normal')(g_input)
# H = BatchNormalization(mode=2)(H) # Commented by Nesar
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Reshape( [14, 14, int(filter_factor)] )(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(3, 3, int(filter_factor/2), border_mode='same', init='glorot_uniform')(H)
# H = BatchNormalization(mode=2)(H) # Commented by Nesar
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(3, 3, int(filter_factor/4), border_mode='same', init='glorot_uniform')(H)
# H = BatchNormalization(mode=2)(H) # Commented by Nesar
H = BatchNormalization()(H)
H = Activation('relu')(H)
H = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
g_V = Activation('sigmoid')(H)
generator = Model(g_input,g_V)
opt_gen = Adam(lr=learning_rate_gen, decay=decay_rate)
generator.compile(loss='binary_crossentropy', optimizer=opt_gen)
# generator.summary()
return generator
# Build Discriminative model
def Discriminator():
d_input = Input(shape=x_train.shape[1:])
H = Convolution2D(5, 5, 256, subsample=(2, 2), border_mode = 'same', activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(5, 5, 512, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(256)(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
d_V = Dense(2,activation='softmax')(H)
discriminator = Model(d_input,d_V)
opt_dis = Adam(lr=learning_rate_dis, decay=decay_rate)
discriminator.compile(loss='categorical_crossentropy', optimizer= opt_dis)
discriminator.summary()
return discriminator
generator = Generator()
discriminator = Discriminator()
# Freeze weights in the discriminator for stacked training
make_trainable(discriminator, False)
# Build stacked GAN model
gan_input = Input(shape=[100])
H = generator(gan_input)
gan_V = discriminator(H)
GAN = Model(gan_input, gan_V)
opt_gan = Adam(lr=learning_rate_gan, decay=decay_rate)
GAN.compile(loss='categorical_crossentropy', optimizer= opt_gan)
GAN.summary()
####################################################################################################
# Pre-training
PreTrain = True
if PreTrain:
ntrain = pre_train_size
np.random.seed(123)
trainidx = np.random.randint(0, x_train.shape[0], size = ntrain)
x_train_selected = x_train[trainidx, :, :, :]
# Pre-train the discriminator network ...
noise_gen = np.random.uniform(0, 1, size=[x_train_selected.shape[0], 100])
generated_images = generator.predict(noise_gen)
x_stacked = np.concatenate((x_train_selected, generated_images))
n = x_train_selected.shape[0]
y_init = np.zeros([2 * n, 2])
y_init[:n, 1] = 1
y_init[n:, 0] = 1
make_trainable(discriminator, True)
discriminator.fit( x_stacked, y_init, nb_epoch=1, batch_size=32)
y_hat = discriminator.predict(x_stacked)
y_hat_idx = np.argmax(y_hat, axis=1)
y_idx = np.argmax(y_init, axis=1)
diff = y_idx - y_hat_idx
n_tot = y_init.shape[0]
n_rig = (diff == 0).sum()
acc = n_rig * 100.0 / n_tot
print("Accuracy: %0.02f pct (%d of %d) right" % (acc, n_rig, n_tot))
def plot_loss(losses):
# display.clear_output(wait=True)
# display.display(plt.gcf())
plt.figure(figsize=(7,5))
plt.plot(losses["dis"], label='discriminative loss')
plt.plot(losses["gen"], label='generative loss')
plt.legend()
# plt.show()
plt.savefig('plots/loss'+fileOut +'.pdf')
def plot_gen(n_ex=16,dim=(4,4), figsize=(8,8) ):
noise = np.random.uniform(0, 1, size=[n_ex, 100])
generated_images = generator.predict(noise)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
img = generated_images[i,:,:,0]
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
# plt.show()
plt.savefig('plots/generated_images'+fileOut+'.pdf')
# set up loss storage vector
losses = {"dis":[], "gen":[]}
def train_for_n(nb_epoch=20, plt_frq=20, BATCH_SIZE=16):
for e in (range(nb_epoch)):
if (e%100==0): print("epoch: %d" %e)
# Make generative images
image_batch = x_train[np.random.randint(0, x_train.shape[0], size=BATCH_SIZE), :, :, :]
noise_gen = np.random.uniform(0, 1, size=[BATCH_SIZE, 100])
generated_images = generator.predict(noise_gen)
# Train discriminator on generated images
X = np.concatenate((image_batch, generated_images))
y = np.zeros([2 * BATCH_SIZE, 2])
y[0:BATCH_SIZE, 1] = 1
y[BATCH_SIZE:, 0] = 1
make_trainable(discriminator, True)
dis_loss = discriminator.train_on_batch(X, y)
losses["dis"].append(dis_loss)
# train Generator-Discriminator stack on input noise to non-generated output class
noise_tr = np.random.uniform(0, 1, size=[BATCH_SIZE, 100])
y2 = np.zeros([BATCH_SIZE, 2])
y2[:, 1] = 1
make_trainable(discriminator, False)
gen_loss = GAN.train_on_batch(noise_tr, y2)
losses["gen"].append(gen_loss)
# Updates plots
if e % plt_frq == plt_frq - 1:
plot_loss(losses)
plot_gen()
# K.set_value(opt.lr, 1e-5)
# K.set_value(dopt.lr, 1e-6)
train_for_n(nb_epoch= num_epoch, plt_frq= num_epoch, BATCH_SIZE=batch_size)
plot_gen(16, (4,4), (8,8))
plot_loss(losses)
def plot_real(n_ex=16, dim=(4, 4), figsize=(8, 8)):
idx = np.random.randint(0, x_train.shape[0], n_ex)
generated_images = x_train[idx, :, :, :]
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i + 1)
img = generated_images[i, :, :, 0]
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.savefig('plots/real_images'+fileOut+'.pdf')
# plt.show()
plot_real()
# plt.show()
generator.save('ModelOutGAN/GANGenerate_' + fileOut + '.hdf5')
discriminator.save('ModelOutGAN/GANdiscriminate_' + fileOut + '.hdf5')
GAN.save('ModelOutGAN/GAN_' + fileOut + '.hdf5')
# np.save('ModelOutEncode/Generate' + fileOut + '.npy', training_hist)
print (50 * '-')
time_j = time.time()
print(time_j - time_i, 'seconds')
print (50 * '-')
| gpl-3.0 |
kiyoto/statsmodels | statsmodels/tsa/tests/test_arima.py | 9 | 89483 | import warnings
from statsmodels.compat.python import lrange, BytesIO
import numpy as np
from nose.tools import nottest
from numpy.testing import (assert_almost_equal, assert_, assert_allclose,
assert_raises, dec, TestCase)
from statsmodels.tools.testing import assert_equal
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.arma_mle import Arma
from statsmodels.tsa.arima_model import ARMA, ARIMA
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.base.datetools import dates_from_range
from .results import results_arma, results_arima
import os
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.datasets.macrodata import load as load_macrodata
from statsmodels.datasets.macrodata import load_pandas as load_macrodata_pandas
import pandas
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
current_path = os.path.dirname(os.path.abspath(__file__))
y_arma = np.genfromtxt(open(current_path + '/results/y_arma_data.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
cpi_dates = dates_from_range('1959Q1', '2009Q3')
cpi_predict_dates = dates_from_range('2009Q3', '2015Q4')
sun_dates = dates_from_range('1700', '2008')
sun_predict_dates = dates_from_range('2008', '2033')
from pandas import DatetimeIndex # pylint: disable-msg=E0611
cpi_dates = DatetimeIndex(cpi_dates, freq='infer')
sun_dates = DatetimeIndex(sun_dates, freq='infer')
cpi_predict_dates = DatetimeIndex(cpi_predict_dates, freq='infer')
sun_predict_dates = DatetimeIndex(sun_predict_dates, freq='infer')
def test_compare_arma():
#this is a preliminary test to compare arma_kf, arma_cond_ls and arma_cond_mle
#the results returned by the fit methods are incomplete
#for now without random.seed
np.random.seed(9876565)
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(nsample=200,
burnin=1000)
# this used kalman filter through descriptive
#d = ARMA(x)
#d.fit((1,1), trend='nc')
#dres = d.res
modkf = ARMA(x, (1,1))
##rkf = mkf.fit((1,1))
##rkf.params
reskf = modkf.fit(trend='nc', disp=-1)
dres = reskf
modc = Arma(x)
resls = modc.fit(order=(1,1))
rescm = modc.fit_mle(order=(1,1), start_params=[0.4,0.4, 1.], disp=0)
#decimal 1 corresponds to threshold of 5% difference
#still different sign corrcted
#assert_almost_equal(np.abs(resls[0] / d.params), np.ones(d.params.shape), decimal=1)
assert_almost_equal(resls[0] / dres.params, np.ones(dres.params.shape),
decimal=1)
#rescm also contains variance estimate as last element of params
#assert_almost_equal(np.abs(rescm.params[:-1] / d.params), np.ones(d.params.shape), decimal=1)
assert_almost_equal(rescm.params[:-1] / dres.params,
np.ones(dres.params.shape), decimal=1)
#return resls[0], d.params, rescm.params
class CheckArmaResultsMixin(object):
"""
res2 are the results from gretl. They are in results/results_arma.
res1 are from statsmodels
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_arroots = DECIMAL_4
def test_arroots(self):
assert_almost_equal(self.res1.arroots, self.res2.arroots,
self.decimal_arroots)
decimal_maroots = DECIMAL_4
def test_maroots(self):
assert_almost_equal(self.res1.maroots, self.res2.maroots,
self.decimal_maroots)
decimal_bse = DECIMAL_2
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_cov_params = DECIMAL_4
def test_covparams(self):
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
self.decimal_cov_params)
decimal_hqic = DECIMAL_4
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic, self.decimal_hqic)
decimal_llf = DECIMAL_4
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_llf)
decimal_resid = DECIMAL_4
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
decimal_pvalues = DECIMAL_2
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_t = DECIMAL_2 # only 2 decimal places in gretl output
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues,
self.decimal_t)
decimal_sigma2 = DECIMAL_4
def test_sigma2(self):
assert_almost_equal(self.res1.sigma2, self.res2.sigma2,
self.decimal_sigma2)
def test_summary(self):
# smoke tests
table = self.res1.summary()
class CheckForecastMixin(object):
decimal_forecast = DECIMAL_4
def test_forecast(self):
assert_almost_equal(self.res1.forecast_res, self.res2.forecast,
self.decimal_forecast)
decimal_forecasterr = DECIMAL_4
def test_forecasterr(self):
assert_almost_equal(self.res1.forecast_err, self.res2.forecasterr,
self.decimal_forecasterr)
class CheckDynamicForecastMixin(object):
decimal_forecast_dyn = 4
def test_dynamic_forecast(self):
assert_almost_equal(self.res1.forecast_res_dyn, self.res2.forecast_dyn,
self.decimal_forecast_dyn)
#def test_forecasterr(self):
# assert_almost_equal(self.res1.forecast_err_dyn,
# self.res2.forecasterr_dyn,
# DECIMAL_4)
class CheckArimaResultsMixin(CheckArmaResultsMixin):
def test_order(self):
assert self.res1.k_diff == self.res2.k_diff
assert self.res1.k_ar == self.res2.k_ar
assert self.res1.k_ma == self.res2.k_ma
decimal_predict_levels = DECIMAL_4
def test_predict_levels(self):
assert_almost_equal(self.res1.predict(typ='levels'), self.res2.linear,
self.decimal_predict_levels)
class Test_Y_ARMA11_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,0]
cls.res1 = ARMA(endog, order=(1,1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11()
def test_pickle(self):
fh = BytesIO()
#test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0,0)
res_unpickled = self.res1.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res1))
class Test_Y_ARMA14_NoConst(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,1]
cls.res1 = ARMA(endog, order=(1,4)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma14()
@dec.slow
class Test_Y_ARMA41_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,2]
cls.res1 = ARMA(endog, order=(4,1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma41()
cls.decimal_maroots = DECIMAL_3
class Test_Y_ARMA22_NoConst(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,3]
cls.res1 = ARMA(endog, order=(2,2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma22()
class Test_Y_ARMA50_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,4]
cls.res1 = ARMA(endog, order=(5,0)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50()
class Test_Y_ARMA02_NoConst(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,5]
cls.res1 = ARMA(endog, order=(0,2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma02()
class Test_Y_ARMA11_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,6]
cls.res1 = ARMA(endog, order=(1,1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
class Test_Y_ARMA14_Const(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,7]
cls.res1 = ARMA(endog, order=(1,4)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma14c()
class Test_Y_ARMA41_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,8]
cls.res2 = results_arma.Y_arma41c()
cls.res1 = ARMA(endog, order=(4,1)).fit(trend="c", disp=-1,
start_params=cls.res2.params)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.decimal_cov_params = DECIMAL_3
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_resid = DECIMAL_3
cls.decimal_params = DECIMAL_3
class Test_Y_ARMA22_Const(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,9]
cls.res1 = ARMA(endog, order=(2,2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma22c()
class Test_Y_ARMA50_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,10]
cls.res1 = ARMA(endog, order=(5,0)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50c()
class Test_Y_ARMA02_Const(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,11]
cls.res1 = ARMA(endog, order=(0,2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma02c()
# cov_params and tvalues are off still but not as much vs. R
class Test_Y_ARMA11_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,0]
cls.res1 = ARMA(endog, order=(1,1)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma11("css")
cls.decimal_t = DECIMAL_1
# better vs. R
class Test_Y_ARMA14_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,1]
cls.res1 = ARMA(endog, order=(1,4)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma14("css")
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_resid = DECIMAL_3
cls.decimal_t = DECIMAL_1
# bse, etc. better vs. R
# maroot is off because maparams is off a bit (adjust tolerance?)
class Test_Y_ARMA41_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,2]
cls.res1 = ARMA(endog, order=(4,1)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma41("css")
cls.decimal_t = DECIMAL_1
cls.decimal_pvalues = 0
cls.decimal_cov_params = DECIMAL_3
cls.decimal_maroots = DECIMAL_1
#same notes as above
class Test_Y_ARMA22_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,3]
cls.res1 = ARMA(endog, order=(2,2)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma22("css")
cls.decimal_t = DECIMAL_1
cls.decimal_resid = DECIMAL_3
cls.decimal_pvalues = DECIMAL_1
cls.decimal_fittedvalues = DECIMAL_3
#NOTE: gretl just uses least squares for AR CSS
# so BIC, etc. is
# -2*res1.llf + np.log(nobs)*(res1.q+res1.p+res1.k)
# with no adjustment for p and no extra sigma estimate
#NOTE: so our tests use x-12 arima results which agree with us and are
# consistent with the rest of the models
class Test_Y_ARMA50_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,4]
cls.res1 = ARMA(endog, order=(5,0)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma50("css")
cls.decimal_t = 0
cls.decimal_llf = DECIMAL_1 # looks like rounding error?
class Test_Y_ARMA02_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,5]
cls.res1 = ARMA(endog, order=(0,2)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma02("css")
#NOTE: our results are close to --x-12-arima option and R
class Test_Y_ARMA11_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,6]
cls.res1 = ARMA(endog, order=(1,1)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma11c("css")
cls.decimal_params = DECIMAL_3
cls.decimal_cov_params = DECIMAL_3
cls.decimal_t = DECIMAL_1
class Test_Y_ARMA14_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,7]
cls.res1 = ARMA(endog, order=(1,4)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma14c("css")
cls.decimal_t = DECIMAL_1
cls.decimal_pvalues = DECIMAL_1
class Test_Y_ARMA41_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,8]
cls.res1 = ARMA(endog, order=(4,1)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma41c("css")
cls.decimal_t = DECIMAL_1
cls.decimal_cov_params = DECIMAL_1
cls.decimal_maroots = DECIMAL_3
cls.decimal_bse = DECIMAL_1
class Test_Y_ARMA22_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,9]
cls.res1 = ARMA(endog, order=(2,2)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma22c("css")
cls.decimal_t = 0
cls.decimal_pvalues = DECIMAL_1
class Test_Y_ARMA50_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,10]
cls.res1 = ARMA(endog, order=(5,0)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma50c("css")
cls.decimal_t = DECIMAL_1
cls.decimal_params = DECIMAL_3
cls.decimal_cov_params = DECIMAL_2
class Test_Y_ARMA02_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,11]
cls.res1 = ARMA(endog, order=(0,2)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma02c("css")
def test_reset_trend():
endog = y_arma[:,0]
mod = ARMA(endog, order=(1,1))
res1 = mod.fit(trend="c", disp=-1)
res2 = mod.fit(trend="nc", disp=-1)
assert_equal(len(res1.params), len(res2.params)+1)
@dec.slow
def test_start_params_bug():
data = np.array([1368., 1187, 1090, 1439, 2362, 2783, 2869, 2512, 1804,
1544, 1028, 869, 1737, 2055, 1947, 1618, 1196, 867, 997, 1862, 2525,
3250, 4023, 4018, 3585, 3004, 2500, 2441, 2749, 2466, 2157, 1847, 1463,
1146, 851, 993, 1448, 1719, 1709, 1455, 1950, 1763, 2075, 2343, 3570,
4690, 3700, 2339, 1679, 1466, 998, 853, 835, 922, 851, 1125, 1299, 1105,
860, 701, 689, 774, 582, 419, 846, 1132, 902, 1058, 1341, 1551, 1167,
975, 786, 759, 751, 649, 876, 720, 498, 553, 459, 543, 447, 415, 377,
373, 324, 320, 306, 259, 220, 342, 558, 825, 994, 1267, 1473, 1601,
1896, 1890, 2012, 2198, 2393, 2825, 3411, 3406, 2464, 2891, 3685, 3638,
3746, 3373, 3190, 2681, 2846, 4129, 5054, 5002, 4801, 4934, 4903, 4713,
4745, 4736, 4622, 4642, 4478, 4510, 4758, 4457, 4356, 4170, 4658, 4546,
4402, 4183, 3574, 2586, 3326, 3948, 3983, 3997, 4422, 4496, 4276, 3467,
2753, 2582, 2921, 2768, 2789, 2824, 2482, 2773, 3005, 3641, 3699, 3774,
3698, 3628, 3180, 3306, 2841, 2014, 1910, 2560, 2980, 3012, 3210, 3457,
3158, 3344, 3609, 3327, 2913, 2264, 2326, 2596, 2225, 1767, 1190, 792,
669, 589, 496, 354, 246, 250, 323, 495, 924, 1536, 2081, 2660, 2814, 2992,
3115, 2962, 2272, 2151, 1889, 1481, 955, 631, 288, 103, 60, 82, 107, 185,
618, 1526, 2046, 2348, 2584, 2600, 2515, 2345, 2351, 2355, 2409, 2449,
2645, 2918, 3187, 2888, 2610, 2740, 2526, 2383, 2936, 2968, 2635, 2617,
2790, 3906, 4018, 4797, 4919, 4942, 4656, 4444, 3898, 3908, 3678, 3605,
3186, 2139, 2002, 1559, 1235, 1183, 1096, 673, 389, 223, 352, 308, 365,
525, 779, 894, 901, 1025, 1047, 981, 902, 759, 569, 519, 408, 263, 156,
72, 49, 31, 41, 192, 423, 492, 552, 564, 723, 921, 1525, 2768, 3531, 3824,
3835, 4294, 4533, 4173, 4221, 4064, 4641, 4685, 4026, 4323, 4585, 4836,
4822, 4631, 4614, 4326, 4790, 4736, 4104, 5099, 5154, 5121, 5384, 5274,
5225, 4899, 5382, 5295, 5349, 4977, 4597, 4069, 3733, 3439, 3052, 2626,
1939, 1064, 713, 916, 832, 658, 817, 921, 772, 764, 824, 967, 1127, 1153,
824, 912, 957, 990, 1218, 1684, 2030, 2119, 2233, 2657, 2652, 2682, 2498,
2429, 2346, 2298, 2129, 1829, 1816, 1225, 1010, 748, 627, 469, 576, 532,
475, 582, 641, 605, 699, 680, 714, 670, 666, 636, 672, 679, 446, 248, 134,
160, 178, 286, 413, 676, 1025, 1159, 952, 1398, 1833, 2045, 2072, 1798,
1799, 1358, 727, 353, 347, 844, 1377, 1829, 2118, 2272, 2745, 4263, 4314,
4530, 4354, 4645, 4547, 5391, 4855, 4739, 4520, 4573, 4305, 4196, 3773,
3368, 2596, 2596, 2305, 2756, 3747, 4078, 3415, 2369, 2210, 2316, 2263,
2672, 3571, 4131, 4167, 4077, 3924, 3738, 3712, 3510, 3182, 3179, 2951,
2453, 2078, 1999, 2486, 2581, 1891, 1997, 1366, 1294, 1536, 2794, 3211,
3242, 3406, 3121, 2425, 2016, 1787, 1508, 1304, 1060, 1342, 1589, 2361,
3452, 2659, 2857, 3255, 3322, 2852, 2964, 3132, 3033, 2931, 2636, 2818,
3310, 3396, 3179, 3232, 3543, 3759, 3503, 3758, 3658, 3425, 3053, 2620,
1837, 923, 712, 1054, 1376, 1556, 1498, 1523, 1088, 728, 890, 1413, 2524,
3295, 4097, 3993, 4116, 3874, 4074, 4142, 3975, 3908, 3907, 3918, 3755,
3648, 3778, 4293, 4385, 4360, 4352, 4528, 4365, 3846, 4098, 3860, 3230,
2820, 2916, 3201, 3721, 3397, 3055, 2141, 1623, 1825, 1716, 2232, 2939,
3735, 4838, 4560, 4307, 4975, 5173, 4859, 5268, 4992, 5100, 5070, 5270,
4760, 5135, 5059, 4682, 4492, 4933, 4737, 4611, 4634, 4789, 4811, 4379,
4689, 4284, 4191, 3313, 2770, 2543, 3105, 2967, 2420, 1996, 2247, 2564,
2726, 3021, 3427, 3509, 3759, 3324, 2988, 2849, 2340, 2443, 2364, 1252,
623, 742, 867, 684, 488, 348, 241, 187, 279, 355, 423, 678, 1375, 1497,
1434, 2116, 2411, 1929, 1628, 1635, 1609, 1757, 2090, 2085, 1790, 1846,
2038, 2360, 2342, 2401, 2920, 3030, 3132, 4385, 5483, 5865, 5595, 5485,
5727, 5553, 5560, 5233, 5478, 5159, 5155, 5312, 5079, 4510, 4628, 4535,
3656, 3698, 3443, 3146, 2562, 2304, 2181, 2293, 1950, 1930, 2197, 2796,
3441, 3649, 3815, 2850, 4005, 5305, 5550, 5641, 4717, 5131, 2831, 3518,
3354, 3115, 3515, 3552, 3244, 3658, 4407, 4935, 4299, 3166, 3335, 2728,
2488, 2573, 2002, 1717, 1645, 1977, 2049, 2125, 2376, 2551, 2578, 2629,
2750, 3150, 3699, 4062, 3959, 3264, 2671, 2205, 2128, 2133, 2095, 1964,
2006, 2074, 2201, 2506, 2449, 2465, 2064, 1446, 1382, 983, 898, 489, 319,
383, 332, 276, 224, 144, 101, 232, 429, 597, 750, 908, 960, 1076, 951,
1062, 1183, 1404, 1391, 1419, 1497, 1267, 963, 682, 777, 906, 1149, 1439,
1600, 1876, 1885, 1962, 2280, 2711, 2591, 2411])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = ARMA(data, order=(4,1)).fit(disp=-1)
class Test_ARIMA101(CheckArmaResultsMixin):
# just make sure this works
@classmethod
def setupClass(cls):
endog = y_arma[:,6]
cls.res1 = ARIMA(endog, (1,0,1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
cls.res2.k_diff = 0
cls.res2.k_ar = 1
cls.res2.k_ma = 1
class Test_ARIMA111(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
@classmethod
def setupClass(cls):
cpi = load_macrodata().data['cpi']
cls.res1 = ARIMA(cpi, (1,1,1)).fit(disp=-1)
cls.res2 = results_arima.ARIMA111()
# make sure endog names changes to D.cpi
cls.decimal_llf = 3
cls.decimal_aic = 3
cls.decimal_bic = 3
cls.decimal_cov_params = 2 # this used to be better?
cls.decimal_t = 0
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226, typ='levels', dynamic=True)
#TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164+63,
typ='levels', dynamic=True)
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.0000], 4)
assert_almost_equal(self.res1.mafreq, [0.0000], 4)
class Test_ARIMA111CSS(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
@classmethod
def setupClass(cls):
cpi = load_macrodata().data['cpi']
cls.res1 = ARIMA(cpi, (1,1,1)).fit(disp=-1, method='css')
cls.res2 = results_arima.ARIMA111(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
cls.decimal_forecast = 2
cls.decimal_forecast_dyn = 2
cls.decimal_forecasterr = 3
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164+63,
typ='levels', dynamic=True)
# precisions
cls.decimal_arroots = 3
cls.decimal_cov_params = 3
cls.decimal_hqic = 3
cls.decimal_maroots = 3
cls.decimal_t = 1
cls.decimal_fittedvalues = 2 # because of rounding when copying
cls.decimal_resid = 2
#cls.decimal_llf = 3
#cls.decimal_aic = 3
#cls.decimal_bic = 3
cls.decimal_predict_levels = DECIMAL_2
class Test_ARIMA112CSS(CheckArimaResultsMixin):
@classmethod
def setupClass(cls):
cpi = load_macrodata().data['cpi']
cls.res1 = ARIMA(cpi, (1,1,2)).fit(disp=-1, method='css',
start_params = [.905322, -.692425, 1.07366,
0.172024])
cls.res2 = results_arima.ARIMA112(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
cls.decimal_llf = 3
cls.decimal_aic = 3
cls.decimal_bic = 3
#(cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226, typ='levels', dynamic=True)
#TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
#cls.res1.forecast_res_dyn = self.predict(start=164, end=164+63,
# typ='levels', dynamic=True)
# since we got from gretl don't have linear prediction in differences
cls.decimal_arroots = 3
cls.decimal_maroots = 2
cls.decimal_t = 1
cls.decimal_resid = 2
cls.decimal_fittedvalues = 3
cls.decimal_predict_levels = DECIMAL_3
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.5000], 4)
assert_almost_equal(self.res1.mafreq, [0.5000, 0.5000], 4)
#class Test_ARIMADates(CheckArmaResults, CheckForecast, CheckDynamicForecast):
# @classmethod
# def setupClass(cls):
# from statsmodels.tsa.datetools import dates_from_range
#
# cpi = load_macrodata().data['cpi']
# dates = dates_from_range('1959q1', length=203)
# cls.res1 = ARIMA(cpi, dates=dates, freq='Q').fit(order=(1,1,1), disp=-1)
# cls.res2 = results_arima.ARIMA111()
# # make sure endog names changes to D.cpi
# cls.decimal_llf = 3
# cls.decimal_aic = 3
# cls.decimal_bic = 3
# (cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
def test_arima_predict_mle_dates():
cpi = load_macrodata().data['cpi']
res1 = ARIMA(cpi, (4,1,1), dates=cpi_dates, freq='Q').fit(disp=-1)
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_mle.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
start, end = 2, 51
fv = res1.predict('1959Q3', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_dates[start:end+1])
start, end = 202, 227
fv = res1.predict('2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = '1960q2', '1971q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:51+1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227+1], DECIMAL_4)
def test_arma_predict_mle_dates():
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
mod = ARMA(sunspots, (9,0), dates=sun_dates, freq='A')
mod.method = 'mle'
assert_raises(ValueError, mod._get_predict_start, *('1701', True))
start, end = 2, 51
_ = mod._get_predict_start('1702', False)
_ = mod._get_predict_end('1751')
assert_equal(mod.data.predict_dates, sun_dates[start:end+1])
start, end = 308, 333
_ = mod._get_predict_start('2008', False)
_ = mod._get_predict_end('2033')
assert_equal(mod.data.predict_dates, sun_predict_dates)
def test_arima_predict_css_dates():
cpi = load_macrodata().data['cpi']
res1 = ARIMA(cpi, (4,1,1), dates=cpi_dates, freq='Q').fit(disp=-1,
method='css', trend='nc')
params = np.array([ 1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_css.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_dates[start:end+1])
start, end = 202, 227
fv = res1.model.predict(params, '2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels',
dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227+1], DECIMAL_4)
def test_arma_predict_css_dates():
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
mod = ARMA(sunspots, (9,0), dates=sun_dates, freq='A')
mod.method = 'css'
assert_raises(ValueError, mod._get_predict_start, *('1701', False))
def test_arima_predict_mle():
cpi = load_macrodata().data['cpi']
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1)
# fit the model so that we get correct endog length but use
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_mle.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
# 0 indicates the first sample-observation below
# ie., the index after the pre-sample, these are also differenced once
# so the indices are moved back once from the cpi in levels
# start < p, end <p 1959q2 - 1959q4
start, end = 1,3
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end 0 1959q3 - 1960q1
start, end = 2, 4
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end >0 1959q3 - 1971q4
start, end = 2, 51
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end nobs 1959q3 - 2009q3
start, end = 2, 202
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end >nobs 1959q3 - 2015q4
start, end = 2, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >0 1960q1 - 1971q4
start, end = 4, 51
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 4, 202
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 4, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
#NOTE: raises
#start, end = 202, 202
#fv = res1.predict(start, end, typ='levels')
#assert_almost_equal(fv, [])
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_3)
# start >nobs, end >nobs 2009q4 - 2015q4
#NOTE: this raises but shouldn't, dynamic forecasts could start
#one period out
start, end = 203, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[1:203], DECIMAL_4)
#### Dynamic #####
# start < p, end <p 1959q2 - 1959q4
#NOTE: should raise
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, arima_forecasts[:,15])
# start < p, end 0 1959q3 - 1960q1
#NOTE: below should raise an error
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
# start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def _check_start(model, given, expected, dynamic):
start = model._get_predict_start(given, dynamic)
assert_equal(start, expected)
def _check_end(model, given, end_expect, out_of_sample_expect):
end, out_of_sample = model._get_predict_end(given)
assert_equal((end, out_of_sample), (end_expect, out_of_sample_expect))
def test_arma_predict_indices():
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
model = ARMA(sunspots, (9,0), dates=sun_dates, freq='A')
model.method = 'mle'
# raises - pre-sample + dynamic
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(8, True))
assert_raises(ValueError, model._get_predict_start, *('1700', True))
assert_raises(ValueError, model._get_predict_start, *('1708', True))
# raises - start out of sample
assert_raises(ValueError, model._get_predict_start, *(311, True))
assert_raises(ValueError, model._get_predict_start, *(311, False))
assert_raises(ValueError, model._get_predict_start, *('2010', True))
assert_raises(ValueError, model._get_predict_start, *('2010', False))
# works - in-sample
# None
# given, expected, dynamic
start_test_cases = [
(None, 9, True),
# all start get moved back by k_diff
(9, 9, True),
(10, 10, True),
# what about end of sample start - last value is first
# forecast
(309, 309, True),
(308, 308, True),
(0, 0, False),
(1, 1, False),
(4, 4, False),
# all start get moved back by k_diff
('1709', 9, True),
('1710', 10, True),
# what about end of sample start - last value is first
# forecast
('2008', 308, True),
('2009', 309, True),
('1700', 0, False),
('1708', 8, False),
('1709', 9, False),
]
for case in start_test_cases:
_check_start(*((model,) + case))
# the length of sunspot is 309, so last index is 208
end_test_cases = [(None, 308, 0),
(307, 307, 0),
(308, 308, 0),
(309, 308, 1),
(312, 308, 4),
(51, 51, 0),
(333, 308, 25),
('2007', 307, 0),
('2008', 308, 0),
('2009', 308, 1),
('2012', 308, 4),
('1815', 115, 0),
('2033', 308, 25),
]
for case in end_test_cases:
_check_end(*((model,)+case))
def test_arima_predict_indices():
cpi = load_macrodata().data['cpi']
model = ARIMA(cpi, (4,1,1), dates=cpi_dates, freq='Q')
model.method = 'mle'
# starting indices
# raises - pre-sample + dynamic
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(4, True))
assert_raises(ValueError, model._get_predict_start, *('1959Q1', True))
assert_raises(ValueError, model._get_predict_start, *('1960Q1', True))
# raises - index differenced away
assert_raises(ValueError, model._get_predict_start, *(0, False))
assert_raises(ValueError, model._get_predict_start, *('1959Q1', False))
# raises - start out of sample
assert_raises(ValueError, model._get_predict_start, *(204, True))
assert_raises(ValueError, model._get_predict_start, *(204, False))
assert_raises(ValueError, model._get_predict_start, *('2010Q1', True))
assert_raises(ValueError, model._get_predict_start, *('2010Q1', False))
# works - in-sample
# None
# given, expected, dynamic
start_test_cases = [
(None, 4, True),
# all start get moved back by k_diff
(5, 4, True),
(6, 5, True),
# what about end of sample start - last value is first
# forecast
(203, 202, True),
(1, 0, False),
(4, 3, False),
(5, 4, False),
# all start get moved back by k_diff
('1960Q2', 4, True),
('1960Q3', 5, True),
# what about end of sample start - last value is first
# forecast
('2009Q4', 202, True),
('1959Q2', 0, False),
('1960Q1', 3, False),
('1960Q2', 4, False),
]
for case in start_test_cases:
_check_start(*((model,) + case))
# check raises
#TODO: make sure dates are passing through unmolested
#assert_raises(ValueError, model._get_predict_end, ("2001-1-1",))
# the length of diff(cpi) is 202, so last index is 201
end_test_cases = [(None, 201, 0),
(201, 200, 0),
(202, 201, 0),
(203, 201, 1),
(204, 201, 2),
(51, 50, 0),
(164+63, 201, 25),
('2009Q2', 200, 0),
('2009Q3', 201, 0),
('2009Q4', 201, 1),
('2010Q1', 201, 2),
('1971Q4', 50, 0),
('2015Q4', 201, 25),
]
for case in end_test_cases:
_check_end(*((model,)+case))
# check higher k_diff
model.k_diff = 2
# raises - pre-sample + dynamic
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(5, True))
assert_raises(ValueError, model._get_predict_start, *('1959Q1', True))
assert_raises(ValueError, model._get_predict_start, *('1960Q1', True))
# raises - index differenced away
assert_raises(ValueError, model._get_predict_start, *(1, False))
assert_raises(ValueError, model._get_predict_start, *('1959Q2', False))
start_test_cases = [(None, 4, True),
# all start get moved back by k_diff
(6, 4, True),
# what about end of sample start - last value is first
# forecast
(203, 201, True),
(2, 0, False),
(4, 2, False),
(5, 3, False),
('1960Q3', 4, True),
# what about end of sample start - last value is first
# forecast
('2009Q4', 201, True),
('2009Q4', 201, True),
('1959Q3', 0, False),
('1960Q1', 2, False),
('1960Q2', 3, False),
]
for case in start_test_cases:
_check_start(*((model,)+case))
end_test_cases = [(None, 200, 0),
(201, 199, 0),
(202, 200, 0),
(203, 200, 1),
(204, 200, 2),
(51, 49, 0),
(164+63, 200, 25),
('2009Q2', 199, 0),
('2009Q3', 200, 0),
('2009Q4', 200, 1),
('2010Q1', 200, 2),
('1971Q4', 49, 0),
('2015Q4', 200, 25),
]
for case in end_test_cases:
_check_end(*((model,)+case))
def test_arima_predict_indices_css():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
model = ARIMA(cpi, (4,1,1))
model.method = 'css'
assert_raises(ValueError, model._get_predict_start, *(0, False))
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(2, False))
assert_raises(ValueError, model._get_predict_start, *(2, True))
def test_arima_predict_css():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1, method="css",
trend="nc")
# but use gretl parameters to predict to avoid precision problems
params = np.array([ 1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_css.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
#NOTE: should raise
#start, end = 1,3
#fv = res1.model.predict(params, start, end)
## start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.model.predict(params, start, end)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.model.predict(params, start, end)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.model.predict(params, start, end)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.model.predict(params, start, end)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
#TODO: why detoriating precision?
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[5:203], DECIMAL_4)
#### Dynamic #####
#NOTE: should raise
# start < p, end <p 1959q2 - 1959q4
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True)
# start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def test_arima_predict_css_diffs():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1, method="css",
trend="c")
# but use gretl parameters to predict to avoid precision problems
params = np.array([0.78349893861244,
-0.533444105973324,
0.321103691668809,
0.264012463189186,
0.107888256920655,
0.920132542916995])
# we report mean, should we report constant?
params[0] = params[0] / (1 - params[1:5].sum())
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_css_diff.csv',
"rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
#NOTE: should raise
#start, end = 1,3
#fv = res1.model.predict(params, start, end)
## start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.model.predict(params, start, end)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.model.predict(params, start, end)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.model.predict(params, start, end)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.model.predict(params, start, end)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
#TODO: why detoriating precision?
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[5:203], DECIMAL_4)
#### Dynamic #####
#NOTE: should raise
# start < p, end <p 1959q2 - 1959q4
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True)
# start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, dynamic=True)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def test_arima_predict_mle_diffs():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1, trend="c")
# but use gretl parameters to predict to avoid precision problems
params = np.array([0.926875951549299,
-0.555862621524846,
0.320865492764400,
0.252253019082800,
0.113624958031799,
0.939144026934634])
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_mle_diff.csv',
"rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
#NOTE: should raise
start, end = 1,3
fv = res1.model.predict(params, start, end)
## start < p, end 0 1959q3 - 1960q1
start, end = 2, 4
fv = res1.model.predict(params, start, end)
## start < p, end >0 1959q3 - 1971q4
start, end = 2, 51
fv = res1.model.predict(params, start, end)
## start < p, end nobs 1959q3 - 2009q3
start, end = 2, 202
fv = res1.model.predict(params, start, end)
## start < p, end >nobs 1959q3 - 2015q4
start, end = 2, 227
fv = res1.model.predict(params, start, end)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
#TODO: why detoriating precision?
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[1:203], DECIMAL_4)
#### Dynamic #####
#NOTE: should raise
# start < p, end <p 1959q2 - 1959q4
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True)
# start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, dynamic=True)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def test_arima_wrapper():
cpi = load_macrodata_pandas().data['cpi']
cpi.index = pandas.Index(cpi_dates)
res = ARIMA(cpi, (4,1,1), freq='Q').fit(disp=-1)
assert_equal(res.params.index, pandas.Index(['const', 'ar.L1.D.cpi', 'ar.L2.D.cpi',
'ar.L3.D.cpi', 'ar.L4.D.cpi',
'ma.L1.D.cpi']))
assert_equal(res.model.endog_names, 'D.cpi')
def test_1dexog():
# smoke test, this will raise an error if broken
dta = load_macrodata_pandas().data
endog = dta['realcons'].values
exog = dta['m1'].values.squeeze()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = ARMA(endog, (1,1), exog).fit(disp=-1)
mod.predict(193, 203, exog[-10:])
# check for dynamic is true and pandas Series see #2589
mod.predict(193, 202, exog[-10:], dynamic=True)
dta.index = pandas.Index(cpi_dates)
mod = ARMA(dta['realcons'], (1,1), dta['m1']).fit(disp=-1)
mod.predict(dta.index[-10], dta.index[-1], exog=dta['m1'][-10:], dynamic=True)
mod = ARMA(dta['realcons'], (1,1), dta['m1']).fit(trend='nc', disp=-1)
mod.predict(dta.index[-10], dta.index[-1], exog=dta['m1'][-10:], dynamic=True)
def test_arima_predict_bug():
#predict_start_date wasn't getting set on start = None
from statsmodels.datasets import sunspots
dta = sunspots.load_pandas().data.SUNACTIVITY
dta.index = pandas.Index(dates_from_range('1700', '2008'))
arma_mod20 = ARMA(dta, (2,0)).fit(disp=-1)
arma_mod20.predict(None, None)
# test prediction with time stamp, see #2587
predict = arma_mod20.predict(dta.index[-20], dta.index[-1])
assert_(predict.index.equals(dta.index[-20:]))
predict = arma_mod20.predict(dta.index[-20], dta.index[-1], dynamic=True)
assert_(predict.index.equals(dta.index[-20:]))
# partially out of sample
predict_dates = pandas.Index(dates_from_range('2000', '2015'))
predict = arma_mod20.predict(predict_dates[0], predict_dates[-1])
assert_(predict.index.equals(predict_dates))
#assert_(1 == 0)
def test_arima_predict_q2():
# bug with q > 1 for arima predict
inv = load_macrodata().data['realinv']
arima_mod = ARIMA(np.log(inv), (1,1,2)).fit(start_params=[0,0,0,0], disp=-1)
fc, stderr, conf_int = arima_mod.forecast(5)
# values copy-pasted from gretl
assert_almost_equal(fc,
[7.306320, 7.313825, 7.321749, 7.329827, 7.337962],
5)
def test_arima_predict_pandas_nofreq():
# this is issue 712
from pandas import DataFrame
dates = ["2010-01-04", "2010-01-05", "2010-01-06", "2010-01-07",
"2010-01-08", "2010-01-11", "2010-01-12", "2010-01-11",
"2010-01-12", "2010-01-13", "2010-01-17"]
close = [626.75, 623.99, 608.26, 594.1, 602.02, 601.11, 590.48, 587.09,
589.85, 580.0,587.62]
data = DataFrame(close, index=DatetimeIndex(dates), columns=["close"])
#TODO: fix this names bug for non-string names names
arma = ARMA(data, order=(1,0)).fit(disp=-1)
# first check that in-sample prediction works
predict = arma.predict()
assert_(predict.index.equals(data.index))
# check that this raises an exception when date not on index
assert_raises(ValueError, arma.predict, start="2010-1-9", end=10)
assert_raises(ValueError, arma.predict, start="2010-1-9", end="2010-1-17")
# raise because end not on index
assert_raises(ValueError, arma.predict, start="2010-1-4", end="2010-1-10")
# raise because end not on index
assert_raises(ValueError, arma.predict, start=3, end="2010-1-10")
predict = arma.predict(start="2010-1-7", end=10) # should be of length 10
assert_(len(predict) == 8)
assert_(predict.index.equals(data.index[3:10+1]))
predict = arma.predict(start="2010-1-7", end=14)
assert_(predict.index.equals(pandas.Index(lrange(3, 15))))
predict = arma.predict(start=3, end=14)
assert_(predict.index.equals(pandas.Index(lrange(3, 15))))
# end can be a date if it's in the sample and on the index
# predict dates is just a slice of the dates index then
predict = arma.predict(start="2010-1-6", end="2010-1-13")
assert_(predict.index.equals(data.index[2:10]))
predict = arma.predict(start=2, end="2010-1-13")
assert_(predict.index.equals(data.index[2:10]))
def test_arima_predict_exog():
# check 625 and 626
#from statsmodels.tsa.arima_process import arma_generate_sample
#arparams = np.array([1, -.45, .25])
#maparams = np.array([1, .15])
#nobs = 100
#np.random.seed(123)
#y = arma_generate_sample(arparams, maparams, nobs, burnin=100)
## make an exogenous trend
#X = np.array(lrange(nobs)) / 20.0
## add a constant
#y += 2.5
from pandas import read_csv
arima_forecasts = read_csv(current_path + "/results/"
"results_arima_exog_forecasts_mle.csv")
y = arima_forecasts["y"].dropna()
X = np.arange(len(y) + 25)/20.
predict_expected = arima_forecasts["predict"]
arma_res = ARMA(y.values, order=(2,1), exog=X[:100]).fit(trend="c",
disp=-1)
# params from gretl
params = np.array([2.786912485145725, -0.122650190196475,
0.533223846028938, -0.319344321763337,
0.132883233000064])
assert_almost_equal(arma_res.params, params, 5)
# no exog for in-sample
predict = arma_res.predict()
assert_almost_equal(predict, predict_expected.values[:100], 5)
# check 626
assert_(len(arma_res.model.exog_names) == 5)
# exog for out-of-sample and in-sample dynamic
predict = arma_res.model.predict(params, end=124, exog=X[100:])
assert_almost_equal(predict, predict_expected.values, 6)
# conditional sum of squares
#arima_forecasts = read_csv(current_path + "/results/"
# "results_arima_exog_forecasts_css.csv")
#predict_expected = arima_forecasts["predict"].dropna()
#arma_res = ARMA(y.values, order=(2,1), exog=X[:100]).fit(trend="c",
# method="css",
# disp=-1)
#params = np.array([2.152350033809826, -0.103602399018814,
# 0.566716580421188, -0.326208009247944,
# 0.102142932143421])
#predict = arma_res.model.predict(params)
## in-sample
#assert_almost_equal(predict, predict_expected.values[:98], 6)
#predict = arma_res.model.predict(params, end=124, exog=X[100:])
## exog for out-of-sample and in-sample dynamic
#assert_almost_equal(predict, predict_expected.values, 3)
def test_arima_no_diff():
# issue 736
# smoke test, predict will break if we have ARIMAResults but
# ARMA model, need ARIMA(p, 0, q) to return an ARMA in init.
ar = [1, -.75, .15, .35]
ma = [1, .25, .9]
y = arma_generate_sample(ar, ma, 100)
mod = ARIMA(y, (3, 0, 2))
assert_(type(mod) is ARMA)
res = mod.fit(disp=-1)
# smoke test just to be sure
res.predict()
def test_arima_predict_noma():
# issue 657
# smoke test
ar = [1, .75]
ma = [1]
data = arma_generate_sample(ar, ma, 100)
arma = ARMA(data, order=(0,1))
arma_res = arma.fit(disp=-1)
arma_res.forecast(1)
def test_arimax():
dta = load_macrodata_pandas().data
dates = dates_from_range("1959Q1", length=len(dta))
dta.index = cpi_dates
dta = dta[["realdpi", "m1", "realgdp"]]
y = dta.pop("realdpi")
# 1 exog
#X = dta.ix[1:]["m1"]
#res = ARIMA(y, (2, 1, 1), X).fit(disp=-1)
#params = [23.902305009084373, 0.024650911502790, -0.162140641341602,
# 0.165262136028113, -0.066667022903974]
#assert_almost_equal(res.params.values, params, 6)
# 2 exog
X = dta
res = ARIMA(y, (2, 1, 1), X).fit(disp=False, solver="nm", maxiter=1000,
ftol=1e-12, xtol=1e-12)
# from gretl
#params = [13.113976653926638, -0.003792125069387, 0.004123504809217,
# -0.199213760940898, 0.151563643588008, -0.033088661096699]
# from stata using double
stata_llf = -1076.108614859121
params = [13.1259220104, -0.00376814509403812, 0.00411970083135622,
-0.19921477896158524, 0.15154396192855729, -0.03308400760360837]
# we can get close
assert_almost_equal(res.params.values, params, 4)
# This shows that it's an optimizer problem and not a problem in the code
assert_almost_equal(res.model.loglike(np.array(params)), stata_llf, 6)
X = dta.diff()
X.iloc[0] = 0
res = ARIMA(y, (2, 1, 1), X).fit(disp=False)
# gretl won't estimate this - looks like maybe a bug on their part,
# but we can just fine, we're close to Stata's answer
# from Stata
params = [19.5656863783347, 0.32653841355833396198,
0.36286527042965188716, -1.01133792126884,
-0.15722368379307766206, 0.69359822544092153418]
assert_almost_equal(res.params.values, params, 3)
def test_bad_start_params():
endog = np.array([820.69093, 781.0103028, 785.8786988, 767.64282267,
778.9837648 , 824.6595702 , 813.01877867, 751.65598567,
753.431091 , 746.920813 , 795.6201904 , 772.65732833,
793.4486454 , 868.8457766 , 823.07226547, 783.09067747,
791.50723847, 770.93086347, 835.34157333, 810.64147947,
738.36071367, 776.49038513, 822.93272333, 815.26461227,
773.70552987, 777.3726522 , 811.83444853, 840.95489133,
777.51031933, 745.90077307, 806.95113093, 805.77521973,
756.70927733, 749.89091773, 1694.2266924 , 2398.4802244 ,
1434.6728516 , 909.73940427, 929.01291907, 769.07561453,
801.1112548 , 796.16163313, 817.2496376 , 857.73046447,
838.849345 , 761.92338873, 731.7842242 , 770.4641844 ])
mod = ARMA(endog, (15, 0))
assert_raises(ValueError, mod.fit)
inv = load_macrodata().data['realinv']
arima_mod = ARIMA(np.log(inv), (1,1,2))
assert_raises(ValueError, mod.fit)
def test_arima_small_data_bug():
# Issue 1038, too few observations with given order
from datetime import datetime
import statsmodels.api as sm
vals = [96.2, 98.3, 99.1, 95.5, 94.0, 87.1, 87.9, 86.7402777504474]
dr = dates_from_range("1990q1", length=len(vals))
ts = pandas.TimeSeries(vals, index=dr)
df = pandas.DataFrame(ts)
mod = sm.tsa.ARIMA(df, (2, 0, 2))
assert_raises(ValueError, mod.fit)
def test_arima_dataframe_integer_name():
# Smoke Test for Issue 1038
from datetime import datetime
import statsmodels.api as sm
vals = [96.2, 98.3, 99.1, 95.5, 94.0, 87.1, 87.9, 86.7402777504474,
94.0, 96.5, 93.3, 97.5, 96.3, 92.]
dr = dates_from_range("1990q1", length=len(vals))
ts = pandas.TimeSeries(vals, index=dr)
df = pandas.DataFrame(ts)
mod = sm.tsa.ARIMA(df, (2, 0, 2))
def test_arima_exog_predict_1d():
# test 1067
np.random.seed(12345)
y = np.random.random(100)
x = np.random.random(100)
mod = ARMA(y, (2, 1), x).fit(disp=-1)
newx = np.random.random(10)
results = mod.forecast(steps=10, alpha=0.05, exog=newx)
def test_arima_1123():
# test ARMAX predict when trend is none
np.random.seed(12345)
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparam = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 20
dates = dates_from_range('1980',length=nobs)
y = arma_generate_sample(arparams, maparams, nobs)
X = np.random.randn(nobs)
y += 5*X
mod = ARMA(y[:-1], order=(1,0), exog=X[:-1])
res = mod.fit(trend='nc', disp=False)
fc = res.forecast(exog=X[-1:])
# results from gretl
assert_almost_equal(fc[0], 2.200393, 6)
assert_almost_equal(fc[1], 1.030743, 6)
assert_almost_equal(fc[2][0,0], 0.180175, 6)
assert_almost_equal(fc[2][0,1], 4.220611, 6)
mod = ARMA(y[:-1], order=(1,1), exog=X[:-1])
res = mod.fit(trend='nc', disp=False)
fc = res.forecast(exog=X[-1:])
assert_almost_equal(fc[0], 2.765688, 6)
assert_almost_equal(fc[1], 0.835048, 6)
assert_almost_equal(fc[2][0,0], 1.129023, 6)
assert_almost_equal(fc[2][0,1], 4.402353, 6)
# make sure this works to. code looked fishy.
mod = ARMA(y[:-1], order=(1,0), exog=X[:-1])
res = mod.fit(trend='c', disp=False)
fc = res.forecast(exog=X[-1:])
assert_almost_equal(fc[0], 2.481219, 6)
assert_almost_equal(fc[1], 0.968759, 6)
assert_almost_equal(fc[2][0], [0.582485, 4.379952], 6)
def test_small_data():
# 1146
y = [-1214.360173, -1848.209905, -2100.918158, -3647.483678, -4711.186773]
# refuse to estimate these
assert_raises(ValueError, ARIMA, y, (2, 0, 3))
assert_raises(ValueError, ARIMA, y, (1, 1, 3))
mod = ARIMA(y, (1, 0, 3))
assert_raises(ValueError, mod.fit, trend="c")
# try to estimate these...leave it up to the user to check for garbage
# and be clear, these are garbage parameters.
# X-12 arima will estimate, gretl refuses to estimate likely a problem
# in start params regression.
res = mod.fit(trend="nc", disp=0, start_params=[.1,.1,.1,.1])
mod = ARIMA(y, (1, 0, 2))
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
class TestARMA00(TestCase):
@classmethod
def setup_class(cls):
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
cls.y = y = sunspots
cls.arma_00_model = ARMA(y, order=(0, 0))
cls.arma_00_res = cls.arma_00_model.fit(disp=-1)
def test_parameters(self):
params = self.arma_00_res.params
assert_almost_equal(self.y.mean(), params)
def test_predictions(self):
predictions = self.arma_00_res.predict()
assert_almost_equal(self.y.mean() * np.ones_like(predictions), predictions)
@nottest
def test_information_criteria(self):
# This test is invalid since the ICs differ due to df_model differences
# between OLS and ARIMA
res = self.arma_00_res
y = self.y
ols_res = OLS(y, np.ones_like(y)).fit(disp=-1)
ols_ic = np.array([ols_res.aic, ols_res.bic])
arma_ic = np.array([res.aic, res.bic])
assert_almost_equal(ols_ic, arma_ic, DECIMAL_4)
def test_arma_00_nc(self):
arma_00 = ARMA(self.y, order=(0, 0))
assert_raises(ValueError, arma_00.fit, trend='nc', disp=-1)
def test_css(self):
arma = ARMA(self.y, order=(0, 0))
fit = arma.fit(method='css', disp=-1)
predictions = fit.predict()
assert_almost_equal(self.y.mean() * np.ones_like(predictions), predictions)
def test_arima(self):
yi = np.cumsum(self.y)
arima = ARIMA(yi, order=(0, 1, 0))
fit = arima.fit(disp=-1)
assert_almost_equal(np.diff(yi).mean(), fit.params, DECIMAL_4)
def test_arma_ols(self):
y = self.y
y_lead = y[1:]
y_lag = y[:-1]
T = y_lag.shape[0]
X = np.hstack((np.ones((T,1)), y_lag[:,None]))
ols_res = OLS(y_lead, X).fit()
arma_res = ARMA(y_lead,order=(0,0),exog=y_lag).fit(trend='c', disp=-1)
assert_almost_equal(ols_res.params, arma_res.params)
def test_arma_exog_no_constant(self):
y = self.y
y_lead = y[1:]
y_lag = y[:-1]
X = y_lag[:,None]
ols_res = OLS(y_lead, X).fit()
arma_res = ARMA(y_lead,order=(0,0),exog=y_lag).fit(trend='nc', disp=-1)
assert_almost_equal(ols_res.params, arma_res.params)
pass
def test_arima_dates_startatend():
# bug
np.random.seed(18)
x = pandas.TimeSeries(np.random.random(36),
index=pandas.DatetimeIndex(start='1/1/1990',
periods=36, freq='M'))
res = ARIMA(x, (1, 0, 0)).fit(disp=0)
pred = res.predict(start=len(x), end=len(x))
assert_(pred.index[0] == x.index.shift(1)[-1])
fc = res.forecast()[0]
assert_almost_equal(pred.values[0], fc)
def test_arma_missing():
from statsmodels.base.data import MissingDataError
# bug 1343
y = np.random.random(40)
y[-1] = np.nan
assert_raises(MissingDataError, ARMA, y, (1, 0), missing='raise')
@dec.skipif(not have_matplotlib)
def test_plot_predict():
from statsmodels.datasets.sunspots import load_pandas
dta = load_pandas().data[['SUNACTIVITY']]
dta.index = DatetimeIndex(start='1700', end='2009', freq='A')
res = ARMA(dta, (3, 0)).fit(disp=-1)
fig = res.plot_predict('1990', '2012', dynamic=True, plot_insample=False)
plt.close(fig)
res = ARIMA(dta, (3, 1, 0)).fit(disp=-1)
fig = res.plot_predict('1990', '2012', dynamic=True, plot_insample=False)
plt.close(fig)
def test_arima_diff2():
dta = load_macrodata_pandas().data['cpi']
dates = dates_from_range("1959Q1", length=len(dta))
dta.index = cpi_dates
mod = ARIMA(dta, (3, 2, 1)).fit(disp=-1)
fc, fcerr, conf_int = mod.forecast(10)
# forecasts from gretl
conf_int_res = [ (216.139, 219.231),
(216.472, 221.520),
(217.064, 223.649),
(217.586, 225.727),
(218.119, 227.770),
(218.703, 229.784),
(219.306, 231.777),
(219.924, 233.759),
(220.559, 235.735),
(221.206, 237.709)]
fc_res = [217.685, 218.996, 220.356, 221.656, 222.945, 224.243, 225.541,
226.841, 228.147, 229.457]
fcerr_res = [0.7888, 1.2878, 1.6798, 2.0768, 2.4620, 2.8269, 3.1816,
3.52950, 3.8715, 4.2099]
assert_almost_equal(fc, fc_res, 3)
assert_almost_equal(fcerr, fcerr_res, 3)
assert_almost_equal(conf_int, conf_int_res, 3)
predicted = mod.predict('2008Q1', '2012Q1', typ='levels')
predicted_res = [214.464, 215.478, 221.277, 217.453, 212.419, 213.530,
215.087, 217.685 , 218.996 , 220.356 , 221.656 ,
222.945 , 224.243 , 225.541 , 226.841 , 228.147 ,
229.457]
assert_almost_equal(predicted, predicted_res, 3)
def test_arima111_predict_exog_2127():
# regression test for issue #2127
ef = [ 0.03005, 0.03917, 0.02828, 0.03644, 0.03379, 0.02744,
0.03343, 0.02621, 0.0305 , 0.02455, 0.03261, 0.03507,
0.02734, 0.05373, 0.02677, 0.03443, 0.03331, 0.02741,
0.03709, 0.02113, 0.03343, 0.02011, 0.03675, 0.03077,
0.02201, 0.04844, 0.05518, 0.03765, 0.05433, 0.03049,
0.04829, 0.02936, 0.04421, 0.02457, 0.04007, 0.03009,
0.04504, 0.05041, 0.03651, 0.02719, 0.04383, 0.02887,
0.0344 , 0.03348, 0.02364, 0.03496, 0.02549, 0.03284,
0.03523, 0.02579, 0.0308 , 0.01784, 0.03237, 0.02078,
0.03508, 0.03062, 0.02006, 0.02341, 0.02223, 0.03145,
0.03081, 0.0252 , 0.02683, 0.0172 , 0.02225, 0.01579,
0.02237, 0.02295, 0.0183 , 0.02356, 0.02051, 0.02932,
0.03025, 0.0239 , 0.02635, 0.01863, 0.02994, 0.01762,
0.02837, 0.02421, 0.01951, 0.02149, 0.02079, 0.02528,
0.02575, 0.01634, 0.02563, 0.01719, 0.02915, 0.01724,
0.02804, 0.0275 , 0.02099, 0.02522, 0.02422, 0.03254,
0.02095, 0.03241, 0.01867, 0.03998, 0.02212, 0.03034,
0.03419, 0.01866, 0.02623, 0.02052]
ue = [ 4.9, 5. , 5. , 5. , 4.9, 4.7, 4.8, 4.7, 4.7,
4.6, 4.6, 4.7, 4.7, 4.5, 4.4, 4.5, 4.4, 4.6,
4.5, 4.4, 4.5, 4.4, 4.6, 4.7, 4.6, 4.7, 4.7,
4.7, 5. , 5. , 4.9, 5.1, 5. , 5.4, 5.6, 5.8,
6.1, 6.1, 6.5, 6.8, 7.3, 7.8, 8.3, 8.7, 9. ,
9.4, 9.5, 9.5, 9.6, 9.8, 10. , 9.9, 9.9, 9.7,
9.8, 9.9, 9.9, 9.6, 9.4, 9.5, 9.5, 9.5, 9.5,
9.8, 9.4, 9.1, 9. , 9. , 9.1, 9. , 9.1, 9. ,
9. , 9. , 8.8, 8.6, 8.5, 8.2, 8.3, 8.2, 8.2,
8.2, 8.2, 8.2, 8.1, 7.8, 7.8, 7.8, 7.9, 7.9,
7.7, 7.5, 7.5, 7.5, 7.5, 7.3, 7.2, 7.2, 7.2,
7. , 6.7, 6.6, 6.7, 6.7, 6.3, 6.3]
# rescaling results in convergence failure
#model = sm.tsa.ARIMA(np.array(ef)*100, (1,1,1), exog=ue)
model = ARIMA(ef, (1,1,1), exog=ue)
res = model.fit(transparams=False, iprint=0, disp=0)
predicts = res.predict(start=len(ef), end = len(ef)+10,
exog=ue[-11:], typ = 'levels')
# regression test, not verified numbers
# if exog=ue in predict, which values are used ?
predicts_res = np.array(
[ 0.02612291, 0.02361929, 0.024966 , 0.02448193, 0.0248772 ,
0.0248762 , 0.02506319, 0.02516542, 0.02531214, 0.02544654,
0.02559099, 0.02550931])
# if exog=ue[-11:] in predict
predicts_res = np.array(
[ 0.02591112, 0.02321336, 0.02436593, 0.02368773, 0.02389767,
0.02372018, 0.02374833, 0.02367407, 0.0236443 , 0.02362868,
0.02362312])
assert_allclose(predicts, predicts_res, atol=1e-6)
def test_ARIMA_exog_predict():
# test forecasting and dynamic prediction with exog against Stata
dta = load_macrodata_pandas().data
dates = dates_from_range("1959Q1", length=len(dta))
cpi_dates = dates_from_range('1959Q1', '2009Q3')
dta.index = cpi_dates
data = dta
data['loginv'] = np.log(data['realinv'])
data['loggdp'] = np.log(data['realgdp'])
data['logcons'] = np.log(data['realcons'])
forecast_period = dates_from_range('2008Q2', '2009Q3')
end = forecast_period[0]
data_sample = data.ix[dta.index < end]
exog_full = data[['loggdp', 'logcons']]
# pandas
mod = ARIMA(data_sample['loginv'], (1,0,1), exog=data_sample[['loggdp', 'logcons']])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=0, solver='bfgs', maxiter=5000)
predicted_arma_fp = res.predict(start=197, end=202, exog=exog_full.values[197:]).values
predicted_arma_dp = res.predict(start=193, end=202, exog=exog_full[197:], dynamic=True)
# numpy
mod2 = ARIMA(np.asarray(data_sample['loginv']), (1,0,1),
exog=np.asarray(data_sample[['loggdp', 'logcons']]))
res2 = mod2.fit(start_params=res.params, disp=0, solver='bfgs', maxiter=5000)
exog_full = data[['loggdp', 'logcons']]
predicted_arma_f = res2.predict(start=197, end=202, exog=exog_full.values[197:])
predicted_arma_d = res2.predict(start=193, end=202, exog=exog_full[197:], dynamic=True)
#ARIMA(1, 1, 1)
ex = np.asarray(data_sample[['loggdp', 'logcons']].diff())
# The first obsevation is not (supposed to be) used, but I get a Lapack problem
# Intel MKL ERROR: Parameter 5 was incorrect on entry to DLASCL.
ex[0] = 0
mod111 = ARIMA(np.asarray(data_sample['loginv']), (1,1,1),
# Stata differences also the exog
exog=ex)
res111 = mod111.fit(disp=0, solver='bfgs', maxiter=5000)
exog_full_d = data[['loggdp', 'logcons']].diff()
res111.predict(start=197, end=202, exog=exog_full_d.values[197:])
predicted_arima_f = res111.predict(start=196, end=202, exog=exog_full_d.values[197:], typ='levels')
predicted_arima_d = res111.predict(start=193, end=202, exog=exog_full_d.values[197:], typ='levels', dynamic=True)
res_f101 = np.array([ 7.73975859954, 7.71660108543, 7.69808978329, 7.70872117504,
7.6518392758 , 7.69784279784, 7.70290907856, 7.69237782644,
7.65017785174, 7.66061689028, 7.65980022857, 7.61505314129,
7.51697158428, 7.5165760663 , 7.5271053284 ])
res_f111 = np.array([ 7.74460013693, 7.71958207517, 7.69629561172, 7.71208186737,
7.65758850178, 7.69223472572, 7.70411775588, 7.68896109499,
7.64016249001, 7.64871881901, 7.62550283402, 7.55814609462,
7.44431310053, 7.42963968062, 7.43554675427])
res_d111 = np.array([ 7.74460013693, 7.71958207517, 7.69629561172, 7.71208186737,
7.65758850178, 7.69223472572, 7.71870821151, 7.7299430215 ,
7.71439447355, 7.72544001101, 7.70521902623, 7.64020040524,
7.5281927191 , 7.5149442694 , 7.52196378005])
res_d101 = np.array([ 7.73975859954, 7.71660108543, 7.69808978329, 7.70872117504,
7.6518392758 , 7.69784279784, 7.72522142662, 7.73962377858,
7.73245950636, 7.74935432862, 7.74449584691, 7.69589103679,
7.5941274688 , 7.59021764836, 7.59739267775])
assert_allclose(predicted_arma_dp, res_d101[-len(predicted_arma_d):], atol=1e-4)
assert_allclose(predicted_arma_fp, res_f101[-len(predicted_arma_f):], atol=1e-4)
assert_allclose(predicted_arma_d, res_d101[-len(predicted_arma_d):], atol=1e-4)
assert_allclose(predicted_arma_f, res_f101[-len(predicted_arma_f):], atol=1e-4)
assert_allclose(predicted_arima_d, res_d111[-len(predicted_arima_d):], rtol=1e-4, atol=1e-4)
assert_allclose(predicted_arima_f, res_f111[-len(predicted_arima_f):], rtol=1e-4, atol=1e-4)
# test for forecast with 0 ar fix in #2457 numbers again from Stata
res_f002 = np.array([ 7.70178181209, 7.67445481224, 7.6715373765 , 7.6772915319 ,
7.61173201163, 7.67913499878, 7.6727609212 , 7.66275451925,
7.65199799315, 7.65149983741, 7.65554131408, 7.62213286298,
7.53795983357, 7.53626130154, 7.54539963934])
res_d002 = np.array([ 7.70178181209, 7.67445481224, 7.6715373765 , 7.6772915319 ,
7.61173201163, 7.67913499878, 7.67306697759, 7.65287924998,
7.64904451605, 7.66580449603, 7.66252081172, 7.62213286298,
7.53795983357, 7.53626130154, 7.54539963934])
mod_002 = ARIMA(np.asarray(data_sample['loginv']), (0,0,2),
exog=np.asarray(data_sample[['loggdp', 'logcons']]))
# doesn't converge with default starting values
res_002 = mod_002.fit(start_params=np.concatenate((res.params[[0, 1, 2, 4]], [0])),
disp=0, solver='bfgs', maxiter=5000)
# forecast
fpredict_002 = res_002.predict(start=197, end=202, exog=exog_full.values[197:])
forecast_002 = res_002.forecast(steps=len(exog_full.values[197:]),
exog=exog_full.values[197:])
forecast_002 = forecast_002[0] # TODO we are not checking the other results
assert_allclose(fpredict_002, res_f002[-len(fpredict_002):], rtol=1e-4, atol=1e-6)
assert_allclose(forecast_002, res_f002[-len(forecast_002):], rtol=1e-4, atol=1e-6)
# dynamic predict
dpredict_002 = res_002.predict(start=193, end=202, exog=exog_full.values[197:],
dynamic=True)
assert_allclose(dpredict_002, res_d002[-len(dpredict_002):], rtol=1e-4, atol=1e-6)
def test_arima_fit_mutliple_calls():
y = [-1214.360173, -1848.209905, -2100.918158, -3647.483678, -4711.186773]
mod = ARIMA(y, (1, 0, 2))
# Make multiple calls to fit
mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
assert_equal(mod.exog_names, ['const', 'ar.L1.y', 'ma.L1.y', 'ma.L2.y'])
mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
assert_equal(mod.exog_names, ['const', 'ar.L1.y', 'ma.L1.y', 'ma.L2.y'])
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 67 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=[0, 1]),
cov=numpy.squeeze(current_prediction["covariance"], axis=[0, 1]))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
qifeigit/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
aalmah/pylearn2 | pylearn2/models/tests/test_svm.py | 28 | 1398 | """Tests for DenseMulticlassSVM"""
from __future__ import print_function
from pylearn2.datasets.mnist import MNIST
from pylearn2.testing.skip import skip_if_no_sklearn, skip_if_no_data
import numpy as np
from theano.compat.six.moves import xrange
import unittest
DenseMulticlassSVM = None
class TestSVM(unittest.TestCase):
"""
Test class for DenseMulticlassSVM
Parameters
----------
Inherited from unittest.TestCase
"""
def setUp(self):
"""
Set up test for DenseMulticlassSVM.
Imports DenseMulticlassSVM if available, skips the test otherwise.
"""
global DenseMulticlassSVM
skip_if_no_sklearn()
skip_if_no_data()
import pylearn2.models.svm
DenseMulticlassSVM = pylearn2.models.svm.DenseMulticlassSVM
def test_decision_function(self):
"""
Test DenseMulticlassSVM.decision_function.
"""
dataset = MNIST(which_set='train')
X = dataset.X[0:20, :]
y = dataset.y[0:20]
for i in xrange(10):
assert (y == i).sum() > 0
model = DenseMulticlassSVM(kernel='poly', C=1.0).fit(X, y)
f = model.decision_function(X)
print(f)
yhat_f = np.argmax(f, axis=1)
yhat = np.cast[yhat_f.dtype](model.predict(X))
print(yhat_f)
print(yhat)
assert (yhat_f != yhat).sum() == 0
| bsd-3-clause |
wlamond/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 105 | 4300 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
ilayn/scipy | scipy/signal/ltisys.py | 12 | 128865 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant:
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
r"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`:
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([3., 4.]),
array([1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
r"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with
a sampling time of 0.1 seconds:
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([3., 4.]),
array([1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super().__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}`
with sampling time 0.5s:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.1 seconds:
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.5 seconds:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super().__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
# Override NumPy binary operations and ufuncs
__array_priority__ = 100.0
__array_ufunc__ = None
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
def _check_binop_other(self, other):
return isinstance(other, (StateSpace, np.ndarray, float, complex,
np.number, int))
def __mul__(self, other):
"""
Post-multiply another system or a scalar
Handles multiplication of systems in the sense of a frequency domain
multiplication. That means, given two systems E1(s) and E2(s), their
multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s)
is equivalent to first applying E2(s), and then E1(s).
Notes
-----
For SISO systems the order of system application does not matter.
However, for MIMO systems, where the two systems are matrices, the
order above ensures standard Matrix multiplication rules apply.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
return NotImplemented
if self.dt != other.dt:
raise TypeError('Cannot multiply systems with different `dt`.')
n1 = self.A.shape[0]
n2 = other.A.shape[0]
# Interconnection of systems
# x1' = A1 x1 + B1 u1
# y1 = C1 x1 + D1 u1
# x2' = A2 x2 + B2 y1
# y2 = C2 x2 + D2 y1
#
# Plugging in with u1 = y2 yields
# [x1'] [A1 B1*C2 ] [x1] [B1*D2]
# [x2'] = [0 A2 ] [x2] + [B2 ] u2
# [x1]
# y2 = [C1 D1*C2] [x2] + D1*D2 u2
a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))),
np.hstack((zeros((n2, n1)), other.A))))
b = np.vstack((np.dot(self.B, other.D), other.B))
c = np.hstack((self.C, np.dot(self.D, other.C)))
d = np.dot(self.D, other.D)
else:
# Assume that other is a scalar / matrix
# For post multiplication the input gets scaled
a = self.A
b = np.dot(self.B, other)
c = self.C
d = np.dot(self.D, other)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __rmul__(self, other):
"""Pre-multiply a scalar or matrix (but not StateSpace)"""
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
# For pre-multiplication only the output gets scaled
a = self.A
b = self.B
c = np.dot(other, self.C)
d = np.dot(other, self.D)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __neg__(self):
"""Negate the system (equivalent to pre-multiplying by -1)."""
return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict)
def __add__(self, other):
"""
Adds two systems in the sense of frequency domain addition.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
raise TypeError('Cannot add {} and {}'.format(type(self),
type(other)))
if self.dt != other.dt:
raise TypeError('Cannot add systems with different `dt`.')
# Interconnection of systems
# x1' = A1 x1 + B1 u
# y1 = C1 x1 + D1 u
# x2' = A2 x2 + B2 u
# y2 = C2 x2 + D2 u
# y = y1 + y2
#
# Plugging in yields
# [x1'] [A1 0 ] [x1] [B1]
# [x2'] = [0 A2] [x2] + [B2] u
# [x1]
# y = [C1 C2] [x2] + [D1 + D2] u
a = linalg.block_diag(self.A, other.A)
b = np.vstack((self.B, other.B))
c = np.hstack((self.C, other.C))
d = self.D + other.D
else:
other = np.atleast_2d(other)
if self.D.shape == other.shape:
# A scalar/matrix is really just a static system (A=0, B=0, C=0)
a = self.A
b = self.B
c = self.C
d = self.D + other
else:
raise ValueError("Cannot add systems with incompatible "
"dimensions ({} and {})"
.format(self.D.shape, other.shape))
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __sub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(-other)
def __radd__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(other)
def __rsub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return (-self).__add__(other)
def __truediv__(self, other):
"""
Divide by a scalar
"""
# Division by non-StateSpace scalars
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
if isinstance(other, np.ndarray) and other.ndim > 0:
# It's ambiguous what this means, so disallow it
raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays")
return self.__mul__(1/other)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
See Also
--------
lsim
Examples
--------
We'll use `lsim2` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim2
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim2`.
>>> tout, yout, xout = lsim2((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0, 1], [0, 0]])
>>> B = np.array([[0], [1]])
>>> C = np.array([[1, 0]])
>>> D = 0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim2(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float64):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
We'll use `lsim` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim`.
>>> tout, yout, xout = lsim((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0.0, 1.0], [0.0, 0.0]])
>>> B = np.array([[0.0], [1.0]])
>>> C = np.array([[1.0, 0.0]])
>>> D = 0.0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = np.empty((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in range(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, scipy.integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step2(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
r"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`:
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see https://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
https://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
https://hdl.handle.net/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
https://hdl.handle.net/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback", IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and beginning of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError as e:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles") from e
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : tuple of ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dimpulse(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : tuple of ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dstep(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
r"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function
:math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05
seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with
a sampling time of 0.05 seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| bsd-3-clause |
sgenoud/scikit-learn | sklearn/metrics/__init__.py | 1 | 1038 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .metrics import confusion_matrix, roc_curve, auc, precision_score, \
recall_score, fbeta_score, f1_score, zero_one_score, \
precision_recall_fscore_support, classification_report, \
precision_recall_curve, explained_variance_score, r2_score, \
zero_one, mean_square_error, hinge_loss, matthews_corrcoef, \
mean_squared_error
from . import cluster
from .cluster import adjusted_rand_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import completeness_score
from .cluster import v_measure_score
from .cluster import silhouette_score
from .cluster import mutual_info_score
from .cluster import adjusted_mutual_info_score
from .cluster import normalized_mutual_info_score
from .pairwise import euclidean_distances, pairwise_distances, pairwise_kernels
| bsd-3-clause |
jereze/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/neighbors/tests/test_kde.py | 80 | 5560 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
aabadie/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 13 | 26241 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
murali-munna/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
mattbellis/hmis | tests/test_general.py | 1 | 2643 | import hmis
import unittest
import datetime as datetime
import pandas as pd
import sys
filename = 'test_data/hmis_test_data.pkl'
master_dictionary = hmis.read_dictionary_file(filename)
def test_calc_age():
ex_birthdate = hmis.get_date_from_string('1995-05-30')
now = hmis.get_date_from_string('2017-08-22')
ex_age = hmis.calc_age(ex_birthdate,now)
assert ex_age.days == 8120
assert isinstance(ex_age, datetime.timedelta)
def test_get_date_from_string():
ex_date = '1995-05-30'
ex_date = hmis.get_date_from_string(ex_date)
assert ex_date.year == 1995
assert isinstance(ex_date, datetime.datetime)
def test_convert_to_coordinates():
zip_code = 12211
lat, long = hmis.convert_to_coordinates(zip_code)
assert isinstance(lat, float)
assert isinstance(long, float)
assert abs(lat-42.701752) < 0.01
assert abs(long-(-73.7576574)) < 0.001
def test_pretty_print():
#capturedOutput = StringIO.StringIO()
#sys.stdout = capturedOutput
return_val = hmis.pretty_print(master_dictionary[0])
assert return_val == 1
#sys.stdout = sys.__stdout__
#print 'Captured', capturedOutput.getvalue()
#ex_print = '================================/n 110378941/n 1968-05-04/n Transitional Housing In/Out: 5/7/2013 - 5/3/2014 (361 days)/t Zip code: 12202'
#assert ex_print == capturedOutput.getvalue()
def test_calc_average_age_by_year():
ages_list = hmis.calc_average_age_by_year(master_dictionary)
age_earlier, age2013, age2014, age2015, age2016 = ages_list
for a in ages_list:
assert isinstance(a, list)
assert age_earlier== [90.13972602739726, 38.02465753424657]
assert age2013 == []
assert age2014 == []
assert age2015 == [35.6, 16.586301369863012]
assert age2016 == [71.88219178082191, 5.838356164383562]
#sys.stdout = capturedOutput
#hmis.calc_average_age(master_dictionary)
#sys.stdout = sys.__stdout__
#print 'Captured', capturedOutput.getvalue()
#assert 'Average age for all years before 2013: 49 /n Average age for the year 2013: 23 /n Average age for the year 2014: 23 /n Average age for the year 2015: 26 /n Average age for the year 2016: 26' == capturedOutput.getvalue()
| mit |
pvlib/pvlib-python | pvlib/iotools/bsrn.py | 3 | 6686 | """Functions to read data from the Baseline Surface Radiation Network (BSRN).
.. codeauthor:: Adam R. Jensen<adam-r-j@hotmail.com>
"""
import pandas as pd
import gzip
COL_SPECS = [(0, 3), (4, 9), (10, 16), (16, 22), (22, 27), (27, 32), (32, 39),
(39, 45), (45, 50), (50, 55), (55, 64), (64, 70), (70, 75)]
BSRN_COLUMNS = ['day', 'minute',
'ghi', 'ghi_std', 'ghi_min', 'ghi_max',
'dni', 'dni_std', 'dni_min', 'dni_max',
'empty', 'empty', 'empty', 'empty', 'empty',
'dhi', 'dhi_std', 'dhi_min', 'dhi_max',
'lwd', 'lwd_std', 'lwd_min', 'lwd_max',
'temp_air', 'relative_humidity', 'pressure']
def read_bsrn(filename):
"""
Read a BSRN station-to-archive file into a DataFrame.
The BSRN (Baseline Surface Radiation Network) is a world wide network
of high-quality solar radiation monitoring stations as described in [1]_.
The function only parses the basic measurements (LR0100), which include
global, diffuse, direct and downwelling long-wave radiation [2]_. Future
updates may include parsing of additional data and meta-data.
BSRN files are freely available and can be accessed via FTP [3]_. Required
username and password are easily obtainable as described in the BSRN's
Data Release Guidelines [4]_.
Parameters
----------
filename: str
A relative or absolute file path.
Returns
-------
data: DataFrame
A DataFrame with the columns as described below. For more extensive
description of the variables, consult [2]_.
Notes
-----
The data DataFrame includes the following fields:
======================= ====== ==========================================
Key Format Description
======================= ====== ==========================================
day int Day of the month 1-31
minute int Minute of the day 0-1439
ghi float Mean global horizontal irradiance [W/m^2]
ghi_std float Std. global horizontal irradiance [W/m^2]
ghi_min float Min. global horizontal irradiance [W/m^2]
ghi_max float Max. global horizontal irradiance [W/m^2]
dni float Mean direct normal irradiance [W/m^2]
dni_std float Std. direct normal irradiance [W/m^2]
dni_min float Min. direct normal irradiance [W/m^2]
dni_max float Max. direct normal irradiance [W/m^2]
dhi float Mean diffuse horizontal irradiance [W/m^2]
dhi_std float Std. diffuse horizontal irradiance [W/m^2]
dhi_min float Min. diffuse horizontal irradiance [W/m^2]
dhi_max float Max. diffuse horizontal irradiance [W/m^2]
lwd float Mean. downward long-wave radiation [W/m^2]
lwd_std float Std. downward long-wave radiation [W/m^2]
lwd_min float Min. downward long-wave radiation [W/m^2]
lwd_max float Max. downward long-wave radiation [W/m^2]
temp_air float Air temperature [°C]
relative_humidity float Relative humidity [%]
pressure float Atmospheric pressure [hPa]
======================= ====== ==========================================
References
----------
.. [1] `World Radiation Monitoring Center - Baseline Surface Radiation
Network (BSRN)
<https://bsrn.awi.de/>`_
.. [2] `Update of the Technical Plan for BSRN Data Management, 2013,
Global Climate Observing System (GCOS) GCOS-172.
<https://bsrn.awi.de/fileadmin/user_upload/bsrn.awi.de/Publications/gcos-174.pdf>`_
.. [3] `BSRN Data Retrieval via FTP
<https://bsrn.awi.de/data/data-retrieval-via-ftp/>`_
.. [4] `BSRN Data Release Guidelines
<https://bsrn.awi.de/data/conditions-of-data-release/>`_
"""
# Read file and store the starting line number for each logical record (LR)
line_no_dict = {}
if str(filename).endswith('.gz'): # check if file is a gzipped (.gz) file
open_func, mode = gzip.open, 'rt'
else:
open_func, mode = open, 'r'
with open_func(filename, mode) as f:
f.readline() # first line should be *U0001, so read it and discard
line_no_dict['0001'] = 0
date_line = f.readline() # second line contains the year and month
start_date = pd.Timestamp(year=int(date_line[7:11]),
month=int(date_line[3:6]), day=1,
tz='UTC') # BSRN timestamps are UTC
for num, line in enumerate(f, start=2):
if line.startswith('*'): # Find start of all logical records
line_no_dict[line[2:6]] = num # key is 4 digit LR number
# Determine start and end line of logical record LR0100 to be parsed
start_row = line_no_dict['0100'] + 1 # Start line number
# If LR0100 is the last logical record, then read rest of file
if start_row-1 == max(line_no_dict.values()):
end_row = num # then parse rest of the file
else: # otherwise parse until the beginning of the next logical record
end_row = min([i for i in line_no_dict.values() if i > start_row]) - 1
nrows = end_row-start_row+1
# Read file as a fixed width file (fwf)
data = pd.read_fwf(filename, skiprows=start_row, nrows=nrows, header=None,
colspecs=COL_SPECS, na_values=[-999.0, -99.9],
compression='infer')
# Create multi-index and unstack, resulting in one column for each variable
data = data.set_index([data.index // 2, data.index % 2])
data = data.unstack(level=1).swaplevel(i=0, j=1, axis='columns')
# Sort columns to match original order and assign column names
data = data.reindex(sorted(data.columns), axis='columns')
data.columns = BSRN_COLUMNS
# Drop empty columns
data = data.drop('empty', axis='columns')
# Change day and minute type to integer
data['day'] = data['day'].astype('Int64')
data['minute'] = data['minute'].astype('Int64')
# Set datetime index
data.index = (start_date
+ pd.to_timedelta(data['day']-1, unit='d')
+ pd.to_timedelta(data['minute'], unit='T'))
return data
| bsd-3-clause |
rlzijdeman/nlgis2 | maps/bin/viewer.py | 4 | 2527 |
# coding: utf-8
# In[1]:
#!/usr/bin/python
import urllib2
import simplejson
import json
import sys
from shapely.geometry import shape, Polygon, MultiPolygon
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
# Example of polygon
co1 = {"type": "Polygon", "coordinates": [
[(-102.05, 41.0),
(-102.05, 37.0),
(-109.05, 37.0),
(-109.05, 41.0)]]}
varyear = None
varcode = None
if sys.argv[1]:
varcode = sys.argv[1]
if len(sys.argv) > 2:
varyear = sys.argv[2]
# In[5]:
# Default
debug = 0
varcode = 10426
varyear = 1997
varname = "Amsterdam"
apiurl = "http://node-128.dev.socialhistoryservices.org/api/maps"
def getmap(apiurl, code, year, cityname):
amscode = str(code)
if cityname:
amscode = ''
jsondataurl = apiurl + "?year=" + str(year) + "&format=geojson"
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
datapolygons = simplejson.load(f)
def coordinates(polygons, amscode, cityname):
for key in polygons:
if key == 'features':
data = polygons[key]
for key in data:
response = json.dumps(key)
dict = json.loads(response)
for key in dict:
if key == 'properties':
maincode = str(dict[key]['amsterdamcode'])
mainname = dict[key]['name']
if maincode == amscode:
co = dict['geometry']['coordinates']
if mainname.encode('utf-8') == cityname:
co = dict['geometry']['coordinates']
return co
coords = coordinates(datapolygons, amscode, cityname)
x = [i for i,j in coords[0][0]]
y = [j for i,j in coords[0][0]]
return (x,y)
colors = ['red', 'green', 'orange', 'brown', 'purple']
(x,y) = getmap(apiurl, varcode, varyear, varname)
fig = plt.figure()
ax = fig.gca()
ax.plot(x,y)
ax.axis('scaled')
fig.savefig('myplot.png')
plt.show()
#from pyproj import Proj
#pa = Proj("+proj=aea +lat_1=37.0 +lat_2=41.0 +lat_0=39.0 +lon_0=-106.55")
#lon, lat = zip(x[0],y[0])
cop = {"type": "Polygon", "coordinates": [zip(x, y)]}
#x, y = pa(lon, lat)
debug = 1
if debug:
print cop
#shape = shape(cop)
#print shape.type
#print shape.area
# In[ ]:
| gpl-3.0 |
KellyChan/python-examples | cpp/deeplearning/caffe/examples/web_demo/app.py | 41 | 7793 | import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil
import caffe
REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = Image.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 256
default_args['raw_scale'] = 255.
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier + warm start by forward for allocation
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
app.clf.net.forward()
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| mit |
wateraccounting/wa | General/data_conversions.py | 1 | 16172 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 18 13:07:32 2016
@author: tih
"""
import gzip
import zipfile
import gdal
import osr
import os
import pandas as pd
import numpy as np
import netCDF4
import time
def Convert_nc_to_tiff(input_nc, output_folder):
"""
This function converts the nc file into tiff files
Keyword Arguments:
input_nc -- name, name of the adf file
output_folder -- Name of the output tiff file
"""
from datetime import date
import wa.General.raster_conversions as RC
#All_Data = RC.Open_nc_array(input_nc)
if type(input_nc) == str:
nc = netCDF4.Dataset(input_nc)
elif type(input_nc) == list:
nc = netCDF4.MFDataset(input_nc)
Var = nc.variables.keys()[-1]
All_Data = nc[Var]
geo_out, epsg, size_X, size_Y, size_Z, Time = RC.Open_nc_info(input_nc)
if epsg == 4326:
epsg = 'WGS84'
# Create output folder if needed
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for i in range(0,size_Z):
if not Time == -9999:
time_one = Time[i]
d = date.fromordinal(time_one)
name = os.path.splitext(os.path.basename(input_nc))[0]
nameparts = name.split('_')[0:-2]
name_out = os.path.join(output_folder, '_'.join(nameparts) + '_%d.%02d.%02d.tif' %(d.year, d.month, d.day))
Data_one = All_Data[i,:,:]
else:
name=os.path.splitext(os.path.basename(input_nc))[0]
name_out = os.path.join(output_folder, name + '.tif')
Data_one = All_Data[:,:]
Save_as_tiff(name_out, Data_one, geo_out, epsg)
return()
def Convert_grb2_to_nc(input_wgrib, output_nc, band):
import wa.General.raster_conversions as RC
# Get environmental variable
WA_env_paths = os.environ["WA_PATHS"].split(';')
GDAL_env_path = WA_env_paths[0]
GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# Create command
fullCmd = ' '.join(['"%s" -of netcdf -b %d' %(GDAL_TRANSLATE_PATH, band), input_wgrib, output_nc]) # -r {nearest}
RC.Run_command_window(fullCmd)
return()
def Convert_adf_to_tiff(input_adf, output_tiff):
"""
This function converts the adf files into tiff files
Keyword Arguments:
input_adf -- name, name of the adf file
output_tiff -- Name of the output tiff file
"""
import wa.General.raster_conversions as RC
# Get environmental variable
WA_env_paths = os.environ["WA_PATHS"].split(';')
GDAL_env_path = WA_env_paths[0]
GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# convert data from ESRI GRID to GeoTIFF
fullCmd = ('"%s" -co COMPRESS=DEFLATE -co PREDICTOR=1 -co '
'ZLEVEL=1 -of GTiff %s %s') % (GDAL_TRANSLATE_PATH, input_adf, output_tiff)
RC.Run_command_window(fullCmd)
return(output_tiff)
def Extract_Data(input_file, output_folder):
"""
This function extract the zip files
Keyword Arguments:
output_file -- name, name of the file that must be unzipped
output_folder -- Dir, directory where the unzipped data must be
stored
"""
# extract the data
z = zipfile.ZipFile(input_file, 'r')
z.extractall(output_folder)
z.close()
def Extract_Data_gz(zip_filename, outfilename):
"""
This function extract the zip files
Keyword Arguments:
zip_filename -- name, name of the file that must be unzipped
outfilename -- Dir, directory where the unzipped data must be
stored
"""
with gzip.GzipFile(zip_filename, 'rb') as zf:
file_content = zf.read()
save_file_content = file(outfilename, 'wb')
save_file_content.write(file_content)
save_file_content.close()
zf.close()
os.remove(zip_filename)
def Save_as_tiff(name='', data='', geo='', projection=''):
"""
This function save the array as a geotiff
Keyword arguments:
name -- string, directory name
data -- [array], dataset of the geotiff
geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,
pixelsize], (geospatial dataset)
projection -- integer, the EPSG code
"""
# save as a geotiff
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(name, int(data.shape[1]), int(data.shape[0]), 1,
gdal.GDT_Float32, ['COMPRESS=LZW'])
srse = osr.SpatialReference()
if projection == '':
srse.SetWellKnownGeogCS("WGS84")
else:
try:
if not srse.SetWellKnownGeogCS(projection) == 6:
srse.SetWellKnownGeogCS(projection)
else:
try:
srse.ImportFromEPSG(int(projection))
except:
srse.ImportFromWkt(projection)
except:
try:
srse.ImportFromEPSG(int(projection))
except:
srse.ImportFromWkt(projection)
dst_ds.SetProjection(srse.ExportToWkt())
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo)
dst_ds.GetRasterBand(1).WriteArray(data)
dst_ds = None
return()
def Save_as_MEM(data='', geo='', projection=''):
"""
This function save the array as a memory file
Keyword arguments:
data -- [array], dataset of the geotiff
geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation,
pixelsize], (geospatial dataset)
projection -- interger, the EPSG code
"""
# save as a geotiff
driver = gdal.GetDriverByName("MEM")
dst_ds = driver.Create('', int(data.shape[1]), int(data.shape[0]), 1,
gdal.GDT_Float32, ['COMPRESS=LZW'])
srse = osr.SpatialReference()
if projection == '':
srse.SetWellKnownGeogCS("WGS84")
else:
srse.SetWellKnownGeogCS(projection)
dst_ds.SetProjection(srse.ExportToWkt())
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo)
dst_ds.GetRasterBand(1).WriteArray(data)
return(dst_ds)
def Save_as_NC(namenc, DataCube, Var, Reference_filename, Startdate = '', Enddate = '', Time_steps = '', Scaling_factor = 1):
"""
This function save the array as a netcdf file
Keyword arguments:
namenc -- string, complete path of the output file with .nc extension
DataCube -- [array], dataset of the nc file, can be a 2D or 3D array [time, lat, lon], must be same size as reference data
Var -- string, the name of the variable
Reference_filename -- string, complete path to the reference file name
Startdate -- 'YYYY-mm-dd', needs to be filled when you want to save a 3D array, defines the Start datum of the dataset
Enddate -- 'YYYY-mm-dd', needs to be filled when you want to save a 3D array, defines the End datum of the dataset
Time_steps -- 'monthly' or 'daily', needs to be filled when you want to save a 3D array, defines the timestep of the dataset
Scaling_factor -- number, scaling_factor of the dataset, default = 1
"""
# Import modules
import wa.General.raster_conversions as RC
from netCDF4 import Dataset
if not os.path.exists(namenc):
# Get raster information
geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_filename)
# Create the lat/lon rasters
lon = np.arange(size_X)*geo_out[1]+geo_out[0] - 0.5 * geo_out[1]
lat = np.arange(size_Y)*geo_out[5]+geo_out[3] - 0.5 * geo_out[5]
# Create the nc file
nco = Dataset(namenc, 'w', format='NETCDF4_CLASSIC')
nco.description = '%s data' %Var
# Create dimensions, variables and attributes:
nco.createDimension('longitude', size_X)
nco.createDimension('latitude', size_Y)
# Create time dimension if the parameter is time dependent
if Startdate is not '':
if Time_steps == 'monthly':
Dates = pd.date_range(Startdate,Enddate,freq = 'MS')
if Time_steps == 'daily':
Dates = pd.date_range(Startdate,Enddate,freq = 'D')
time_or=np.zeros(len(Dates))
i = 0
for Date in Dates:
time_or[i] = Date.toordinal()
i += 1
nco.createDimension('time', None)
timeo = nco.createVariable('time', 'f4', ('time',))
timeo.units = '%s' %Time_steps
timeo.standard_name = 'time'
# Create the lon variable
lono = nco.createVariable('longitude', 'f8', ('longitude',))
lono.standard_name = 'longitude'
lono.units = 'degrees_east'
lono.pixel_size = geo_out[1]
# Create the lat variable
lato = nco.createVariable('latitude', 'f8', ('latitude',))
lato.standard_name = 'latitude'
lato.units = 'degrees_north'
lato.pixel_size = geo_out[5]
# Create container variable for CRS: lon/lat WGS84 datum
crso = nco.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = proj
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_out
# Create the data variable
if Startdate is not '':
preco = nco.createVariable('%s' %Var, 'f8', ('time', 'latitude', 'longitude'), zlib=True, least_significant_digit=1)
timeo[:]=time_or
else:
preco = nco.createVariable('%s' %Var, 'f8', ('latitude', 'longitude'), zlib=True, least_significant_digit=1)
# Set the data variable information
preco.scale_factor = Scaling_factor
preco.add_offset = 0.00
preco.grid_mapping = 'crs'
preco.set_auto_maskandscale(False)
# Set the lat/lon variable
lono[:] = lon
lato[:] = lat
# Set the data variable
if Startdate is not '':
for i in range(len(Dates)):
preco[i,:,:] = DataCube[i,:,:]*1./np.float(Scaling_factor)
else:
preco[:,:] = DataCube[:,:] * 1./np.float(Scaling_factor)
nco.close()
return()
def Create_NC_name(Var, Simulation, Dir_Basin, sheet_nmbr, info = ''):
# Create the output name
nameOut=''.join(['_'.join([Var,'Simulation%d' % Simulation,'_'.join(info)]),'.nc'])
namePath = os.path.join(Dir_Basin,'Simulations','Simulation_%d' %Simulation, 'Sheet_%d' %sheet_nmbr)
if not os.path.exists(namePath):
os.makedirs(namePath)
nameTot=os.path.join(namePath,nameOut)
return(nameTot)
def Create_new_NC_file(nc_outname, Basin_Example_File, Basin):
# Open basin file
dest = gdal.Open(Basin_Example_File)
Basin_array = dest.GetRasterBand(1).ReadAsArray()
Basin_array[np.isnan(Basin_array)] = -9999
Basin_array[Basin_array<0] = -9999
# Get Basic information
Geo = dest.GetGeoTransform()
size_X = dest.RasterXSize
size_Y = dest.RasterYSize
epsg = dest.GetProjection()
# Get Year and months
year = int(os.path.basename(nc_outname).split(".")[0])
Dates = pd.date_range("%d-01-01" %year, "%d-12-31" %year, freq = "MS")
# Latitude and longitude
lons = np.arange(size_X)*Geo[1]+Geo[0] + 0.5 * Geo[1]
lats = np.arange(size_Y)*Geo[5]+Geo[3] + 0.5 * Geo[5]
# Create NetCDF file
nco = netCDF4.Dataset(nc_outname, 'w', format = 'NETCDF4_CLASSIC')
nco.set_fill_on()
nco.description = '%s' %Basin
# Create dimensions
nco.createDimension('latitude', size_Y)
nco.createDimension('longitude', size_X)
nco.createDimension('time', None)
# Create NetCDF variables
crso = nco.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mapping_name = 'latitude_longitude'
crso.projection = epsg
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = Geo
######################### Save Rasters in NetCDF ##############################
lato = nco.createVariable('latitude', 'f8', ('latitude',))
lato.units = 'degrees_north'
lato.standard_name = 'latitude'
lato.pixel_size = Geo[5]
lono = nco.createVariable('longitude', 'f8', ('longitude',))
lono.units = 'degrees_east'
lono.standard_name = 'longitude'
lono.pixel_size = Geo[1]
timeo = nco.createVariable('time', 'f4', ('time',))
timeo.units = 'Monthly'
timeo.standard_name = 'time'
# Variables
basin_var = nco.createVariable('Landuse', 'i',
('latitude', 'longitude'),
fill_value=-9999)
basin_var.long_name = 'Landuse'
basin_var.grid_mapping = 'crs'
# Create time unit
i = 0
time_or=np.zeros(len(Dates))
for Date in Dates:
time_or[i] = Date.toordinal()
i += 1
# Load data
lato[:] = lats
lono[:] = lons
timeo[:] = time_or
basin_var[:,:] = Basin_array
# close the file
time.sleep(1)
nco.close()
return()
def Add_NC_Array_Variable(nc_outname, Array, name, unit, Scaling_factor = 1):
# create input array
Array[np.isnan(Array)] = -9999 * np.float(Scaling_factor)
Array = np.int_(Array * 1./np.float(Scaling_factor))
# Create NetCDF file
nco = netCDF4.Dataset(nc_outname, 'r+', format = 'NETCDF4_CLASSIC')
nco.set_fill_on()
paro = nco.createVariable('%s' %name, 'i',
('time', 'latitude', 'longitude'),fill_value=-9999,
zlib=True, least_significant_digit=0)
paro.scale_factor = Scaling_factor
paro.add_offset = 0.00
paro.grid_mapping = 'crs'
paro.long_name = name
paro.units = unit
paro.set_auto_maskandscale(False)
# Set the data variable
paro[:,:,:] = Array
# close the file
time.sleep(1)
nco.close()
return()
def Add_NC_Array_Static(nc_outname, Array, name, unit, Scaling_factor = 1):
# create input array
Array[np.isnan(Array)] = -9999 * np.float(Scaling_factor)
Array = np.int_(Array * 1./np.float(Scaling_factor))
# Create NetCDF file
nco = netCDF4.Dataset(nc_outname, 'r+', format = 'NETCDF4_CLASSIC')
nco.set_fill_on()
paro = nco.createVariable('%s' %name, 'i',
('latitude', 'longitude'),fill_value=-9999,
zlib=True, least_significant_digit=0)
paro.scale_factor = Scaling_factor
paro.add_offset = 0.00
paro.grid_mapping = 'crs'
paro.long_name = name
paro.units = unit
paro.set_auto_maskandscale(False)
# Set the data variable
paro[:,:] = Array
# close the file
time.sleep(1)
nco.close()
return()
def Convert_dict_to_array(River_dict, Array_dict, Reference_data):
import numpy as np
import os
import wa.General.raster_conversions as RC
if os.path.splitext(Reference_data)[-1] == '.nc':
# Get raster information
geo_out, proj, size_X, size_Y, size_Z, Time = RC.Open_nc_info(Reference_data)
else:
# Get raster information
geo_out, proj, size_X, size_Y = RC.Open_array_info(Reference_data)
# Create ID Matrix
y,x = np.indices((size_Y, size_X))
ID_Matrix = np.int32(np.ravel_multi_index(np.vstack((y.ravel(),x.ravel())),(size_Y,size_X),mode='clip').reshape(x.shape)) + 1
# Get tiff array time dimension:
time_dimension = int(np.shape(Array_dict[0])[0])
# create an empty array
DataCube = np.ones([time_dimension, size_Y, size_X]) * np.nan
for river_part in range(0,len(River_dict)):
for river_pixel in range(1,len(River_dict[river_part])):
river_pixel_ID = River_dict[river_part][river_pixel]
if len(np.argwhere(ID_Matrix == river_pixel_ID))>0:
row, col = np.argwhere(ID_Matrix == river_pixel_ID)[0][:]
DataCube[:,row,col] = Array_dict[river_part][:,river_pixel]
return(DataCube)
| apache-2.0 |
BiaDarkia/scikit-learn | sklearn/model_selection/tests/test_split.py | 21 | 57285 | """Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from itertools import combinations
from itertools import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.utils.fixes import comb
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'])
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size='default',\n train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups)
assert_raise_message(ValueError, msg,
lpo.get_n_splits, None, y, groups)
def test_2d_y():
# smoke test for 2d y and multi-label
n_samples = 30
rng = np.random.RandomState(1)
X = rng.randint(0, 3, size=(n_samples, 2))
y = rng.randint(0, 3, size=(n_samples,))
y_2d = y.reshape(-1, 1)
y_multilabel = rng.randint(0, 2, size=(n_samples, 3))
groups = rng.randint(0, 3, size=(n_samples,))
splitters = [LeaveOneOut(), LeavePOut(p=2), KFold(), StratifiedKFold(),
RepeatedKFold(), RepeatedStratifiedKFold(),
ShuffleSplit(), StratifiedShuffleSplit(test_size=.5),
GroupShuffleSplit(), LeaveOneGroupOut(),
LeavePGroupsOut(n_groups=2), GroupKFold(), TimeSeriesSplit(),
PredefinedSplit(test_fold=groups)]
for splitter in splitters:
list(splitter.split(X, y, groups))
list(splitter.split(X, y_2d, groups))
try:
list(splitter.split(X, y_multilabel, groups))
except ValueError as e:
allowed_target_types = ('binary', 'multiclass')
msg = "Supported target types are: {}. Got 'multilabel".format(
allowed_target_types)
assert msg in str(e)
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
# Make sure string labels are also supported
X = np.ones(7)
y1 = ['1', '1', '1', '0', '0', '0', '0']
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)),
list(StratifiedKFold(2).split(X, y2)))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
# Test if the two splits are different
# numpy's assert_equal properly compares nested lists
try:
np.testing.assert_array_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
@ignore_warnings
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
['1', '1', '1', '1', '2', '2', '2', '3', '3', '3', '3', '3'],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
def test_stratified_shuffle_split_multilabel():
# fix for issue 9037
for y in [np.array([[0, 1], [1, 0], [1, 0], [0, 1]]),
np.array([[0, 1], [1, 1], [1, 1], [0, 1]])]:
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# no overlap
assert_array_equal(np.intersect1d(train, test), [])
# complete partition
assert_array_equal(np.union1d(train, test), np.arange(len(y)))
# correct stratification of entire rows
# (by design, here y[:, 0] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 0])
assert_equal(expected_ratio, np.mean(y_train[:, 0]))
assert_equal(expected_ratio, np.mean(y_test[:, 0]))
def test_stratified_shuffle_split_multilabel_many_labels():
# fix in PR #9922: for multilabel data with > 1000 labels, str(row)
# truncates with an ellipsis for elements in positions 4 through
# len(row) - 4, so labels were not being correctly split using the powerset
# method for transforming a multilabel problem to a multiclass one; this
# test checks that this problem is fixed.
row_with_many_zeros = [1, 0, 1] + [0] * 1000 + [1, 0, 1]
row_with_many_ones = [1, 0, 1] + [1] * 1000 + [1, 0, 1]
y = np.array([row_with_many_zeros] * 10 + [row_with_many_ones] * 100)
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.5, random_state=0)
train, test = next(sss.split(X=X, y=y))
y_train = y[train]
y_test = y[test]
# correct stratification of entire rows
# (by design, here y[:, 4] uniquely determines the entire row of y)
expected_ratio = np.mean(y[:, 4])
assert_equal(expected_ratio, np.mean(y_train[:, 4]))
assert_equal(expected_ratio, np.mean(y_test[:, 4]))
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
for groups_i in test_groups:
X = y = np.ones(len(groups_i))
n_splits = 6
test_size = 1. / 3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=groups_i), n_splits)
l_unique = np.unique(groups_i)
l = np.asarray(groups_i)
for train, test in slo.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_one_p_group_out():
logo = LeaveOneGroupOut()
lpgo_1 = LeavePGroupsOut(n_groups=1)
lpgo_2 = LeavePGroupsOut(n_groups=2)
# Make sure the repr works
assert_equal(repr(logo), 'LeaveOneGroupOut()')
assert_equal(repr(lpgo_1), 'LeavePGroupsOut(n_groups=1)')
assert_equal(repr(lpgo_2), 'LeavePGroupsOut(n_groups=2)')
assert_equal(repr(LeavePGroupsOut(n_groups=3)),
'LeavePGroupsOut(n_groups=3)')
for j, (cv, p_groups_out) in enumerate(((logo, 1), (lpgo_1, 1),
(lpgo_2, 2))):
for i, groups_i in enumerate(test_groups):
n_groups = len(np.unique(groups_i))
n_splits = (n_groups if p_groups_out == 1
else n_groups * (n_groups - 1) / 2)
X = y = np.ones(len(groups_i))
# Test that the length is correct
assert_equal(cv.get_n_splits(X, y, groups=groups_i), n_splits)
groups_arr = np.asarray(groups_i)
# Split using the original list / array / list of string groups_i
for train, test in cv.split(X, y, groups=groups_i):
# First test: no train group is in the test set and vice versa
assert_array_equal(np.intersect1d(groups_arr[train],
groups_arr[test]).tolist(),
[])
# Second test: train and test add up to all the data
assert_equal(len(train) + len(test), len(groups_i))
# Third test:
# The number of groups in test must be equal to p_groups_out
assert_true(np.unique(groups_arr[test]).shape[0], p_groups_out)
# check get_n_splits() with dummy parameters
assert_equal(logo.get_n_splits(None, None, ['a', 'b', 'c', 'b', 'c']), 3)
assert_equal(logo.get_n_splits(groups=[1.0, 1.1, 1.0, 1.2]), 3)
assert_equal(lpgo_2.get_n_splits(None, None, np.arange(4)), 6)
assert_equal(lpgo_1.get_n_splits(groups=np.arange(4)), 4)
# raise ValueError if a `groups` parameter is illegal
with assert_raises(ValueError):
logo.get_n_splits(None, None, [0.0, np.nan, 0.0])
with assert_raises(ValueError):
lpgo_2.get_n_splits(None, None, [0.0, np.inf, 0.0])
msg = "The 'groups' parameter should not be None."
assert_raise_message(ValueError, msg,
logo.get_n_splits, None, None, None)
assert_raise_message(ValueError, msg,
lpgo_1.get_n_splits, None, None, None)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(
3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y=X,
groups=groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y=X,
groups=groups))
def test_leave_one_p_group_out_error_on_fewer_number_of_groups():
X = y = groups = np.ones(0)
assert_raise_message(ValueError, "Found array with 0 sample(s)", next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than 2 unique groups ({}). "
"LeaveOneGroupOut expects at least 2.").format(groups)
assert_raise_message(ValueError, msg, next,
LeaveOneGroupOut().split(X, y, groups))
X = y = groups = np.ones(1)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ({}). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups "
"be present").format(groups)
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
X = y = groups = np.arange(3)
msg = ("The groups parameter contains fewer than (or equal to) n_groups "
"(3) numbers of unique groups ({}). LeavePGroupsOut expects "
"that at least n_groups + 1 (4) unique groups "
"be present").format(groups)
assert_raise_message(ValueError, msg, next,
LeavePGroupsOut(n_groups=3).split(X, y, groups))
@ignore_warnings
def test_repeated_cv_value_errors():
# n_repeats is not integer or <= 0
for cv in (RepeatedKFold, RepeatedStratifiedKFold):
assert_raises(ValueError, cv, n_repeats=0)
assert_raises(ValueError, cv, n_repeats=1.5)
def test_repeated_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
random_state = 258173307
rkf = RepeatedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rkf.split(X)
train, test = next(splits)
assert_array_equal(train, [2, 4])
assert_array_equal(test, [0, 1, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 3])
assert_array_equal(test, [2, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3, 4])
assert_array_equal(test, [0, 1])
assert_raises(StopIteration, next, splits)
def test_get_n_splits_for_repeated_kfold():
n_splits = 3
n_repeats = 4
rkf = RepeatedKFold(n_splits, n_repeats)
expected_n_splits = n_splits * n_repeats
assert_equal(expected_n_splits, rkf.get_n_splits())
def test_get_n_splits_for_repeated_stratified_kfold():
n_splits = 3
n_repeats = 4
rskf = RepeatedStratifiedKFold(n_splits, n_repeats)
expected_n_splits = n_splits * n_repeats
assert_equal(expected_n_splits, rskf.get_n_splits())
def test_repeated_stratified_kfold_determinstic_split():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
y = [1, 1, 1, 0, 0]
random_state = 1944695409
rskf = RepeatedStratifiedKFold(
n_splits=2,
n_repeats=2,
random_state=random_state)
# split should produce same and deterministic splits on
# each call
for _ in range(3):
splits = rskf.split(X, y)
train, test = next(splits)
assert_array_equal(train, [1, 4])
assert_array_equal(test, [0, 2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 2, 3])
assert_array_equal(test, [1, 4])
train, test = next(splits)
assert_array_equal(train, [2, 3])
assert_array_equal(test, [0, 1, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 4])
assert_array_equal(test, [2, 3])
assert_raises(StopIteration, next, splits)
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
assert_raises(ValueError, train_test_split, range(10),
shuffle=False, stratify=True)
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
# test unshuffled split
y = np.arange(10)
for test_size in [2, 0.2]:
train, test = train_test_split(y, shuffle=False, test_size=test_size)
assert_array_equal(test, [8, 9])
assert_array_equal(train, [0, 1, 2, 3, 4, 5, 6, 7])
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def train_test_split_list_input():
# Check that when y is a list / list of string labels, it works.
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
for stratify in (True, False):
X_train1, X_test1, y_train1, y_test1 = train_test_split(
X, y1, stratify=y1 if stratify else None, random_state=0)
X_train2, X_test2, y_train2, y_test2 = train_test_split(
X, y2, stratify=y2 if stratify else None, random_state=0)
X_train3, X_test3, y_train3, y_test3 = train_test_split(
X, y3, stratify=y3 if stratify else None, random_state=0)
np.testing.assert_equal(X_train1, X_train2)
np.testing.assert_equal(y_train2, y_train3)
np.testing.assert_equal(X_test1, X_test3)
np.testing.assert_equal(y_test3, y_test2)
@ignore_warnings
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_stratifiedshufflesplit_list_input():
# Check that when y is a list / list of string labels, it works.
sss = StratifiedShuffleSplit(test_size=2, random_state=42)
X = np.ones(7)
y1 = ['1'] * 4 + ['0'] * 3
y2 = np.hstack((np.ones(4), np.zeros(3)))
y3 = y2.tolist()
np.testing.assert_equal(list(sss.split(X, y1)),
list(sss.split(X, y2)))
np.testing.assert_equal(list(sss.split(X, y3)),
list(sss.split(X, y2)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
# also works with 2d multiclass
y_multiclass_2d = y_multiclass.reshape(-1, 1)
cv = check_cv(3, y_multiclass_2d, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass_2d)),
list(cv.split(X, y_multiclass_2d)))
assert_false(np.all(
next(StratifiedKFold(3).split(X, y_multiclass_2d))[0] ==
next(KFold(3).split(X, y_multiclass_2d))[0]))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
kf_iter = KFold(n_splits=5).split(X, y)
kf_iter_wrapped = check_cv(kf_iter)
# Since the wrapped iterable is enlisted and stored,
# split can be called any number of times to produce
# consistent results.
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_iter_wrapped.split(X, y)))
# If the splits are randomized, successive calls to split yields different
# results
kf_randomized_iter = KFold(n_splits=5, shuffle=True).split(X, y)
kf_randomized_iter_wrapped = check_cv(kf_randomized_iter)
# numpy's assert_array_equal properly compares nested lists
np.testing.assert_equal(list(kf_randomized_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
try:
np.testing.assert_equal(list(kf_iter_wrapped.split(X, y)),
list(kf_randomized_iter_wrapped.split(X, y)))
splits_are_equal = True
except AssertionError:
splits_are_equal = False
assert_false(splits_are_equal, "If the splits are randomized, "
"successive calls to split should yield different results")
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# groups can also be a list
cv_iter = list(lkf.split(X, y, groups.tolist()))
for (train1, test1), (train2, test2) in zip(lkf.split(X, y, groups),
cv_iter):
assert_array_equal(train1, train2)
assert_array_equal(test1, test2)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def _check_time_series_max_train_size(splits, check_splits, max_train_size):
for (train, test), (check_train, check_test) in zip(splits, check_splits):
assert_array_equal(test, check_test)
assert_true(len(check_train) <= max_train_size)
suffix_start = max(len(train) - max_train_size, 0)
assert_array_equal(check_train, train[suffix_start:])
def test_time_series_max_train_size():
X = np.zeros((6, 1))
splits = TimeSeriesSplit(n_splits=3).split(X)
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=3).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=3)
# Test for the case where the size of a fold is greater than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=2).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
# Test for the case where the size of each fold is less than max_train_size
check_splits = TimeSeriesSplit(n_splits=3, max_train_size=5).split(X)
_check_time_series_max_train_size(splits, check_splits, max_train_size=2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_train_test_default_warning():
assert_warns(FutureWarning, ShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, GroupShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, StratifiedShuffleSplit, train_size=0.75)
assert_warns(FutureWarning, train_test_split, range(3),
train_size=0.75)
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| bsd-3-clause |
raymond91125/TissueEnrichmentAnalysis | tea_paper_docs/src/hgf_benchmark_script.py | 4 | 14884 | # -*- coding: utf-8 -*-
"""
A script to benchmark TEA.
@david angeles
dangeles@caltech.edu
"""
import tissue_enrichment_analysis as tea # the library to be used
import pandas as pd
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import re
import matplotlib as mpl
sns.set_context('paper')
# pd.set_option('display.float_format', lambda x:'%f'%x)
pd.set_option('precision', 3)
# this script generates a few directories.
dirOutput = '../output/'
dirSummaries = '../output/SummaryInformation/'
dirHGT25_any = '../output/HGT25_any_Results/'
dirHGT33_any = '../output/HGT33_any_Results/'
dirHGT50_any = '../output/HGT50_any_Results/'
dirHGT100_any = '../output/HGT100_any_Results/'
dirComp = '../output/comparisons/'
DIRS = [dirOutput, dirSummaries, dirHGT25_any,
dirHGT50_any, dirHGT100_any, dirComp]
# open the relevant file
path_sets = '../input/genesets_golden/'
path_dicts = '../input/WS252AnatomyDictionary/'
# Make all the necessary dirs if they don't already exist
for d in DIRS:
if not os.path.exists(d):
os.makedirs(d)
# Make the file that will hold the summaries and make the columns.
with open(dirSummaries+'ExecutiveSummary.csv', 'w') as fSum:
fSum.write('#Summary of results from all benchmarks\n')
fSum.write('NoAnnotations,Threshold,Method,EnrichmentSetUsed,TissuesTested,GenesSubmitted,TissuesReturned,GenesUsed,AvgFold,AvgQ,GenesInDict\n')
# ==============================================================================
# ==============================================================================
# # Perform the bulk of the analysis, run every single dictionary on every set
# ==============================================================================
# ==============================================================================
i = 0
# look in the dictionaries
for folder in os.walk(path_dicts):
# open each one
for f_dict in folder[2]:
if f_dict == '.DS_Store':
continue
tissue_df = pd.read_csv(path_dicts+f_dict)
# tobedropped when tissue dictionary is corrected
annot, thresh = re.findall(r"[-+]?\d*\.\d+|\d+", f_dict)
annot = int(annot)
thresh = float(thresh) # typecasting
method = f_dict[-7:-4]
ntiss = len(tissue_df.columns)
ngenes = tissue_df.shape[0]
# open each enrichment set
for fodder in os.walk(path_sets):
for f_set in fodder[2]:
df = pd.read_csv(path_sets + f_set)
test = df.gene.values
ntest = len(test)
short_name = f_set[16:len(f_set)-16]
df_analysis, unused = tea.enrichment_analysis(test, tissue_df,
alpha=0.05,
show=False)
# save the analysis to the relevant folder
savepath = '../output/HGT'+annot + '_' + method + '_Results/'
df_analysis.to_csv(savepath + f_set+'.csv', index=False)
tea.plot_enrichment_results(df_analysis,
save='savepath'+f_set+'Graph',
ftype='pdf')
nana = len(df_analysis) # len of results
nun = len(unused) # number of genes dropped
avf = df_analysis['Enrichment Fold Change'].mean()
avq = df_analysis['Q value'].mean()
s = '{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'.format(
annot, thresh, method, f_set, ntiss, ntest, nana,
ntest-nun, avf, avq, ngenes)
with open(dirSummaries+'ExecutiveSummary.csv', 'a+') as fSum:
fSum.write(s)
fSum.write('\n')
# Print summary to csv
df_summary = pd.read_csv(dirSummaries+'ExecutiveSummary.csv', comment='#')
# some entries contain nulls. before I remove them, I can inspect them
df_summary.isnull().any()
indexFold = df_summary['AvgFold'].index[df_summary['AvgFold'].apply(np.isnan)]
indexQ = df_summary['AvgQ'].index[df_summary['AvgQ'].apply(np.isnan)]
df_summary.ix[indexFold[0]]
df_summary.ix[indexQ[5]]
# kill all nulls!
df_summary.dropna(inplace=True)
# calculate fraction of tissues that tested significant in each run
df_summary['fracTissues'] = df_summary['TissuesReturned']/df_summary[
'TissuesTested']
df_summary.sort_values(['NoAnnotations', 'Threshold', 'Method'], inplace=True)
# ==============================================================================
# ==============================================================================
# # Plot summary graphs
# ==============================================================================
# ==============================================================================
sel = lambda x, y, z: ((df_summary.NoAnnotations == x) &
(df_summary.Threshold == y) & (df_summary.Method == z))
# KDE of the fraction of all tissues that tested significant
# one color per cutoff
cols = ['#1b9e77', '#d95f02', '#7570b3', '#e7298a', '#66a61e']
ls = ['-', '--', ':'] # used with varying thresh
thresh = df_summary.Threshold.unique()
NoAnnotations = df_summary.NoAnnotations.unique()
def resplot(column, method='any'):
"""
A method to quickly plot all combinations of cutoffs, thresholds.
All cutoffs are same color
All Thresholds are same line style
Parameters:
column -- the column to select
method -- the method used to specify similarity metrics
"""
for j, annots in enumerate(NoAnnotations):
for i, threshold in enumerate(thresh):
if threshold == 1:
continue
s = sel(annots, threshold, method)
df_summary[s][column].plot('kde', color=cols[j], ls=ls[i], lw=4,
label='Annotation Cut-off: {0}, \
Threshold: {1}'.format(annots,
threshold))
resplot('fracTissues')
plt.xlabel('Fraction of all tissues that tested significant')
plt.xlim(0, 1)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'fractissuesKDE_method=any.pdf')
plt.close()
resplot('AvgQ', method='avg')
plt.xlabel('Fraction of all tissues that tested significant')
plt.xlim(0, 0.05)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgQKDE_method=avg.pdf')
plt.close()
resplot('AvgQ')
plt.xlabel('AvgQ value')
plt.xlim(0, .05)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgQKDE_method=any.pdf')
plt.close()
# KDE of the fraction of avgFold
resplot('AvgFold')
plt.xlabel('Avg Fold Change value')
plt.xlim(0, 15)
plt.title('KDE Curves for all dictionaries, benchmarked on all gold standards')
plt.legend()
plt.savefig(dirSummaries+'avgFoldChangeKDE.pdf')
plt.close()
def line_prepender(filename, line):
"""Given filename, open it and prepend 'line' at beginning of the file."""
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
# ==============================================================================
# ==============================================================================
# # Detailed analysis of 25 and 50 genes per node dictionaries
# ==============================================================================
# ==============================================================================
def walker(tissue_df, directory, save=True):
"""Given the tissue dictionary and a directory to save to,
open all the gene sets, analyze them and deposit the results in the
specified directory.
Parameters:
-------------------
tissue_df - pandas dataframe containing specified tissue dictionary
directory - where to save to
save - boolean indicating whether to save results or not.
"""
with open(directory+'empty.txt', 'w') as f:
f.write('Genesets with no enrichment:\n')
# go through each file in the folder
for fodder in os.walk(path_sets):
for f_set in fodder[2]:
# open df
df = pd.read_csv(path_sets + f_set)
# extract gene list and analyze
short_name = f_set
test = df.gene.values
df_analysis, unused = tea.enrichment_analysis(test, tissue_df,
show=False)
# if it's not empty and you want to save:
if df_analysis.empty is False & save:
# save without index
df_analysis.to_csv(directory+short_name+'.csv', index=False)
# add a comment
line = '#' + short_name+'\n'
line_prepender(directory+short_name+'.csv', line)
# plot
tea.plot_enrichment_results(df_analysis, title=short_name,
dirGraphs=directory, ftype='pdf')
plt.close()
# if it's empty and you want to save, place it in file called empty
if df_analysis.empty & save:
with open(directory+'empty.txt', 'a+') as f:
f.write(short_name+'\n')
def compare(resA, resB, l, r):
"""Given two results (.csv files output by tea), open and compare them,
concatenate the dataframes
Parameters:
resA, resB -- filenames that store the dfs
l, r -- suffixes to attach to the columns post merge
Returns:
result - a dataframe that is the outer merger of resA, resB
"""
# open both dfs.
df1 = pd.read_csv(resA, comment='#')
df2 = pd.read_csv(resB, comment='#')
# drop observed column from df1
df1.drop('Observed', axis=1, inplace=True)
df2.drop('Observed', axis=1, inplace=True)
# make a dummy column, key for merging
df1['key'] = df1['Tissue']
df2['key'] = df2['Tissue']
# find the index of each tissue in either df
result = pd.merge(df1, df2, on='key', suffixes=[l, r], how='outer')
# sort by q val and drop non useful columns
# result.sort_values('Q value{0}'.format(l))
result.drop('Tissue{0}'.format(l), axis=1, inplace=True)
result.drop('Tissue{0}'.format(r), axis=1, inplace=True)
result['Tissue'] = result['key']
# drop key
result.drop('key', axis=1, inplace=True)
result.sort_values(['Q value%s' % (l), 'Q value%s' % (r)], inplace=True)
# drop Expected values
result.drop(['Expected%s' % (l), 'Expected%s' % (r)], axis=1, inplace=True)
# rearrange columns
cols = ['Tissue', 'Q value%s' % (l), 'Q value%s' % (r),
'Enrichment Fold Change%s' % (l), 'Enrichment Fold Change%s' % (r)]
result = result[cols]
# drop observed
return result # return result
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff25_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT25_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff50_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT50_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff100_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT100_any)
tissue_df = pd.read_csv('../input/WS252AnatomyDictionary/cutoff33_threshold0.95_methodany.csv')
walker(tissue_df, dirHGT33_any)
grouped = df_summary.groupby(['NoAnnotations', 'Threshold', 'Method'])
with open('../doc/figures/TissueNumbers.csv', 'w') as f:
f.write('Annotation Cutoff,Similarity Threshold,Method')
f.write(',No. Of Terms in Dictionary\n')
for key, group in grouped:
f.write('{0},{1},{2},{3}\n'.format(key[0], key[1], key[2],
group.TissuesTested.unique()[0]))
tissue_data = pd.read_csv('../output/SummaryInformation/TissueNumbers.csv')
sel = lambda y, z: ((tissue_data.iloc[:, 1] == y) &
(tissue_data.iloc[:, 2] == z))
# KDE of the fraction of all tissues that tested significant
cols = ['#1b9e77', '#d95f02', '#7570b3'] # used with varying colors
thresh = df_summary.Threshold.unique()
NoAnnotations = df_summary.NoAnnotations.unique()
# def resplot(column, cutoff=25, method='any'):
# """
# A method to quickly plot all combinations of cutoffs, thresholds.
# All cutoffs are same color
# All Thresholds are same line style
# """
# for i, threshold in enumerate(thresh):
# ax = plt.gca()
# ax.grid(False)
# if threshold == 1:
# continue
# tissue_data[sel(threshold, method)].plot(x='No. Of Annotations',
# y='No. Of Tissues in Dictionary',
# kind='scatter',
# color=cols[i],
# ax=ax, s=50, alpha=.7)
# ax.set_xlim(20, 110)
# ax.set_xscale('log')
# ax.set_xticks([25, 33, 50, 100])
# ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#
# ax.set_ylim(25, 1000)
# ax.set_yscale('log')
# ax.set_yticks([50, 100, 250, 500])
# ax.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
#
#
# resplot('No. Of Tissues in Dictionary')
a = '../output/HGT33_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
b = '../output/HGT33_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
df = compare(a, b, 'Spencer', 'Watson')
df.to_csv('../output/comparisons/neuronal_comparison_33_WBPaper00024970_with_WBPaper0037950_complete.csv',
index=False, na_rep='-', float_format='%.2g')
a = '../output/HGT33_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
b = '../output/HGT50_any_Results/WBPaper00037950_GABAergic-motor-neurons_larva_enriched_WBbt_0005190_132.csv'
df = compare(a, b, '33', '50')
df.to_csv('../output/comparisons/neuronal_comparison_GABAergic_33-50_WBPaper0037950_complete.csv',
index=False, na_rep='-', float_format='%.2g')
a = '../output/HGT33_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
b = '../output/HGT50_any_Results/WBPaper00024970_GABAergic_neuron_specific_WBbt_0005190_247.csv'
df = compare(a, b, '-33', '-50')
# print to figures
df.head(10).to_csv('../doc/figures/dict-comparison-50-33.csv', index=False,
na_rep='-', float_format='%.2g')
df.to_csv('../output/comparisons/neuronal_comparison_Pan_Neuronal_33-50_WBPaper0031532_complete.csv',
index=False, na_rep='-', float_format='%.2g')
| mit |
ilyes14/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
arjoly/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/tests/test_coding_standards.py | 7 | 12216 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from fnmatch import fnmatch
import os
from nose.tools import assert_equal
from nose.plugins.skip import SkipTest
from matplotlib.testing.noseclasses import KnownFailureTest
try:
import pep8
except ImportError:
HAS_PEP8 = False
else:
HAS_PEP8 = pep8.__version__ > '1.4.5'
import matplotlib
PEP8_ADDITIONAL_IGNORE = ['E111',
'E114',
'E115',
'E116',
'E121',
'E122',
'E123',
'E124',
'E125',
'E126',
'E127',
'E128',
'E129',
'E131',
'E265',
'E266',
'W503']
EXTRA_EXCLUDE_FILE = os.path.join(os.path.dirname(__file__),
'.pep8_test_exclude.txt')
if HAS_PEP8:
class StandardReportWithExclusions(pep8.StandardReport):
#: A class attribute to store the exception exclusion file patterns.
expected_bad_files = []
#: A class attribute to store the lines of failing tests.
_global_deferred_print = []
#: A class attribute to store patterns which have seen exceptions.
matched_exclusions = set()
def get_file_results(self):
# If the file had no errors, return self.file_errors
# (which will be 0).
if not self._deferred_print:
return self.file_errors
# Iterate over all of the patterns, to find a possible exclusion.
# If the filename is to be excluded, go ahead and remove the
# counts that self.error added.
for pattern in self.expected_bad_files:
if fnmatch(self.filename, pattern):
self.matched_exclusions.add(pattern)
# invert the error method's counters.
for _, _, code, _, _ in self._deferred_print:
self.counters[code] -= 1
if self.counters[code] == 0:
self.counters.pop(code)
self.messages.pop(code)
self.file_errors -= 1
self.total_errors -= 1
return self.file_errors
# mirror the content of StandardReport, only storing the output to
# file rather than printing. This could be a feature request for
# the PEP8 tool.
self._deferred_print.sort()
for line_number, offset, code, text, _ in self._deferred_print:
self._global_deferred_print.append(
self._fmt % {'path': self.filename,
'row': self.line_offset + line_number,
'col': offset + 1, 'code': code,
'text': text})
return self.file_errors
def assert_pep8_conformance(module=matplotlib, exclude_files=None,
extra_exclude_file=EXTRA_EXCLUDE_FILE,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE,
dirname=None, expected_bad_files=None,
extra_exclude_directories=None):
"""
Tests the matplotlib codebase against the "pep8" tool.
Users can add their own excluded files (should files exist in the
local directory which is not in the repository) by adding a
".pep8_test_exclude.txt" file in the same directory as this test.
The file should be a line separated list of filenames/directories
as can be passed to the "pep8" tool's exclude list.
"""
if not HAS_PEP8:
raise SkipTest('The pep8 tool is required for this test')
# to get a list of bad files, rather than the specific errors, add
# "reporter=pep8.FileReport" to the StyleGuide constructor.
pep8style = pep8.StyleGuide(quiet=False,
reporter=StandardReportWithExclusions)
reporter = pep8style.options.reporter
if expected_bad_files is not None:
reporter.expected_bad_files = expected_bad_files
# Extend the number of PEP8 guidelines which are not checked.
pep8style.options.ignore = (pep8style.options.ignore +
tuple(pep8_additional_ignore))
# Support for egg shared object wrappers, which are not PEP8 compliant,
# nor part of the matplotlib repository.
# DO NOT ADD FILES *IN* THE REPOSITORY TO THIS LIST.
if exclude_files is not None:
pep8style.options.exclude.extend(exclude_files)
# Allow users to add their own exclude list.
if extra_exclude_file is not None and os.path.exists(extra_exclude_file):
with open(extra_exclude_file, 'r') as fh:
extra_exclude = [line.strip() for line in fh if line.strip()]
pep8style.options.exclude.extend(extra_exclude)
if extra_exclude_directories:
pep8style.options.exclude.extend(extra_exclude_directories)
if dirname is None:
dirname = os.path.dirname(module.__file__)
result = pep8style.check_files([dirname])
if reporter is StandardReportWithExclusions:
msg = ("Found code syntax errors (and warnings):\n"
"{0}".format('\n'.join(reporter._global_deferred_print)))
else:
msg = "Found code syntax errors (and warnings)."
assert_equal(result.total_errors, 0, msg)
# If we've been using the exclusions reporter, check that we didn't
# exclude files unnecessarily.
if reporter is StandardReportWithExclusions:
unexpectedly_good = sorted(set(reporter.expected_bad_files) -
reporter.matched_exclusions)
if unexpectedly_good:
raise ValueError('Some exclude patterns were unnecessary as the '
'files they pointed to either passed the PEP8 '
'tests or do not point to a file:\n '
'{0}'.format('\n '.join(unexpectedly_good)))
def test_pep8_conformance_installed_files():
exclude_files = ['_delaunay.py',
'_image.py',
'_tri.py',
'_backend_agg.py',
'_tkagg.py',
'ft2font.py',
'_cntr.py',
'_contour.py',
'_png.py',
'_path.py',
'ttconv.py',
'_gtkagg.py',
'_backend_gdk.py',
'pyparsing*',
'_qhull.py',
'_macosx.py']
expected_bad_files = ['_cm.py',
'_mathtext_data.py',
'backend_bases.py',
'cbook.py',
'collections.py',
'dviread.py',
'font_manager.py',
'fontconfig_pattern.py',
'gridspec.py',
'legend_handler.py',
'mathtext.py',
'patheffects.py',
'pylab.py',
'pyplot.py',
'rcsetup.py',
'stackplot.py',
'texmanager.py',
'transforms.py',
'type1font.py',
'widgets.py',
'testing/decorators.py',
'testing/jpl_units/Duration.py',
'testing/jpl_units/Epoch.py',
'testing/jpl_units/EpochConverter.py',
'testing/jpl_units/StrConverter.py',
'testing/jpl_units/UnitDbl.py',
'testing/jpl_units/UnitDblConverter.py',
'testing/jpl_units/UnitDblFormatter.py',
'testing/jpl_units/__init__.py',
'tri/triinterpolate.py',
'tests/test_axes.py',
'tests/test_bbox_tight.py',
'tests/test_delaunay.py',
'tests/test_dviread.py',
'tests/test_image.py',
'tests/test_legend.py',
'tests/test_lines.py',
'tests/test_mathtext.py',
'tests/test_rcparams.py',
'tests/test_simplification.py',
'tests/test_streamplot.py',
'tests/test_subplots.py',
'tests/test_tightlayout.py',
'tests/test_triangulation.py',
'compat/subprocess.py',
'backends/__init__.py',
'backends/backend_agg.py',
'backends/backend_cairo.py',
'backends/backend_cocoaagg.py',
'backends/backend_gdk.py',
'backends/backend_gtk.py',
'backends/backend_gtk3.py',
'backends/backend_gtk3cairo.py',
'backends/backend_gtkagg.py',
'backends/backend_gtkcairo.py',
'backends/backend_macosx.py',
'backends/backend_mixed.py',
'backends/backend_pgf.py',
'backends/backend_ps.py',
'backends/backend_svg.py',
'backends/backend_template.py',
'backends/backend_tkagg.py',
'backends/tkagg.py',
'backends/windowing.py',
'backends/qt_editor/formlayout.py',
'sphinxext/mathmpl.py',
'sphinxext/only_directives.py',
'sphinxext/plot_directive.py',
'projections/__init__.py',
'projections/geo.py',
'projections/polar.py',
'externals/six.py']
expected_bad_files = ['*/matplotlib/' + s for s in expected_bad_files]
assert_pep8_conformance(module=matplotlib,
exclude_files=exclude_files,
expected_bad_files=expected_bad_files)
def test_pep8_conformance_examples():
mpldir = os.environ.get('MPL_REPO_DIR', None)
if mpldir is None:
# try and guess!
fp = os.getcwd()
while len(fp) > 2:
if os.path.isdir(os.path.join(fp, 'examples')):
mpldir = fp
break
fp, tail = os.path.split(fp)
if mpldir is None:
raise KnownFailureTest("can not find the examples, set env "
"MPL_REPO_DIR to point to the top-level path "
"of the source tree")
exdir = os.path.join(mpldir, 'examples')
blacklist = ()
expected_bad_files = ['*/pylab_examples/table_demo.py',
'*/pylab_examples/tricontour_demo.py',
'*/pylab_examples/tripcolor_demo.py',
'*/pylab_examples/triplot_demo.py',
'*/shapes_and_collections/artist_reference.py']
assert_pep8_conformance(dirname=exdir,
extra_exclude_directories=blacklist,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE +
['E116', 'E501', 'E402'],
expected_bad_files=expected_bad_files)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
hvanhovell/spark | python/pyspark/sql/tests/test_arrow.py | 8 | 21337 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from pyspark import SparkContext, SparkConf
from pyspark.sql import Row, SparkSession
from pyspark.sql.functions import udf
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
from pyspark.util import _exception_message
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
# Test fallback
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false"
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false"
# Enable Arrow optimization in this tests.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True),
StructField("9_binary_t", BinaryType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1), bytearray(b"a")),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2), bytearray(b"bb")),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3), bytearray(b"ccc")),
(u"d", 4, 40, 1.0, 8.0, Decimal("8.0"),
date(2262, 4, 12), datetime(2262, 3, 3, 3, 3, 3), bytearray(b"dddd"))]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
assert_frame_equal(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
assert_frame_equal(expected, pdf)
assert_frame_equal(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
assert_frame_equal(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def test_no_partition_frame(self):
schema = StructType([StructField("field1", StringType(), True)])
df = self.spark.createDataFrame(self.sc.emptyRDD(), schema)
pdf = df.toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "field1")
self.assertTrue(pdf.empty)
def test_propagates_spark_exception(self):
df = self.spark.range(3).toDF("i")
def raise_exception():
raise Exception("My error")
exception_udf = udf(raise_exception, IntegerType())
df = df.withColumn("error", exception_udf())
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'My error'):
df.toPandas()
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[1] = fields[1], fields[0] # swap str with int
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "integer.*required"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df_from_python.toPandas())
assert_frame_equal(pdf, df_from_pandas.toPandas())
# Regression test for SPARK-28003
def test_timestamp_nat(self):
dt = [pd.NaT, pd.Timestamp('2019-06-11'), None] * 100
pdf = pd.DataFrame({'time': dt})
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf)
assert_frame_equal(pdf, df_no_arrow.toPandas())
assert_frame_equal(pdf, df_arrow.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
# is a static configuration to Spark context.
@classmethod
def setUpClass(cls):
cls.spark = SparkSession(SparkContext(
'local[4]', cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k")))
# Explicitly enable Arrow and disable fallback.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def test_exception_by_max_results(self):
with self.assertRaisesRegexp(Exception, "is bigger than"):
self.spark.range(0, 10000, 1, 100).toPandas()
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
vortex-exoplanet/VIP | vip_hci/negfc/speckle_noise.py | 2 | 12649 | #! /usr/bin/env python
"""
Module with routines allowing for the estimation of the uncertainty on the
parameters of an imaged companion associated to residual speckle noise.
"""
__author__ = 'O. Wertz, C. A. Gomez Gonzalez, V. Christiaens'
__all__ = ['speckle_noise_uncertainty']
#import itertools as itt
from multiprocessing import cpu_count
import numpy as np
import matplotlib.pyplot as plt
from ..conf.utils_conf import pool_map, iterable #eval_func_tuple
from ..metrics import cube_inject_companions
from .simplex_optim import firstguess_simplex
from .simplex_fmerit import get_mu_and_sigma
from .utils_negfc import cube_planet_free
from .mcmc_sampling import confidence
def speckle_noise_uncertainty(cube, p_true, angle_range, derot_angles, algo,
psfn, plsc, fwhm, aperture_radius, cube_ref=None,
fmerit='sum', algo_options={}, transmission=None,
mu_sigma=None, wedge=None, weights=None,
force_rPA=False, nproc=None, simplex_options=None,
bins=None, save=False, output=None, verbose=True,
full_output=True, plot=False):
"""
Step-by-step procedure used to determine the speckle noise uncertainty
associated to the parameters of a companion candidate.
__
| The steps 1 to 3 need to be performed for each angle.
|
| - 1 - At the true planet radial distance and for a given angle, we
| inject a fake companion in our planet-free cube.
|
| - 2 - Then, using the negative fake companion method, we determine the
| position and flux of the fake companion thanks to a Simplex
| Nelder-Mead minimization.
|
| - 3 - We calculate the offset between the true values of the position
| and the flux of the fake companion, and those obtained from the
| minimization. The results will be dependent on the angular
| position of the fake companion.
|__
The resulting distribution of deviations is then used to infer the
1-sigma uncertainty on each parameter by fitting a 1d-gaussian.
Parameters
----------
cube: numpy array
The original ADI cube.
p_true: tuple or numpy array with 3 elements
The radial separation, position angle (from x=0 axis) and flux
associated to a given companion candidate for which the speckle
uncertainty is to be evaluated. The planet will first
be subtracted from the cube, then used for test injections.
angle_range: 1d numpy array
Range of angles (counted from x=0 axis, counter-clockwise) at which the
fake companions will be injected, in [0,360[.
derot_angles: 1d numpy array
Derotation angles for ADI. Length should match input cube.
algo: python routine
Routine to be used to model and subtract the stellar PSF. From an input
cube, derotation angles, and optional arguments, it should return a
post-processed frame.
psfn: 2d numpy array
2d array with the normalized PSF template. The PSF image must be
centered wrt to the array. Therefore, it is recommended to run the
function ``metrics/normalize_psf()`` to generate a centered and
flux-normalized PSF template.
plsc : float
Value of the plsc in arcsec/px. Only used for printing debug output when
``verbose=True``.
algo_options: dict
Options for algo. To be provided as a dictionary. Can include ncomp
(for PCA), svd_mode, collapse, imlib, interpolation, scaling, delta_rot
transmission: numpy array, optional
Array with 2 columns. First column is the radial separation in pixels.
Second column is the off-axis transmission (between 0 and 1) at the
radial separation given in column 1.
mu_sigma: tuple of 2 floats, bool or None, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit.
If a tuple of 2 elements: should be the mean and standard deviation of
pixel intensities in an annulus centered on the lcoation of the
companion candidate, excluding the area directly adjacent to the CC.
If set to anything else, but None/False/tuple: will compute said mean
and standard deviation automatically.
force_rPA: bool, optional
Whether to only search for optimal flux, provided (r,PA).
fmerit: None
Figure of merit to use, if mu_sigma is None.
simplex_options: dict
All the required simplex parameters, for instance {'tol':1e-08,
'max_iter':200}
bins: int or None, opt
Number of bins for histogram of parameter deviations. If None, will be
determined automatically based on number of injected fake companions.
full_output: bool, optional
Whether to return more outputs.
output: str, optional
The name of the output file (if save is True)
save: bool, optional
If True, the result are pickled.
verbose: bool, optional
If True, informations are displayed in the shell.
plot: bool, optional
Whether to plot the gaussian fit to the distributions of parameter
deviations (between retrieved and injected).
Returns:
--------
sp_unc: numpy ndarray of 3 elements
Uncertainties on the radius, position angle and flux of the companion,
respectively, associated to residual speckle noise. Only 1 element if
force_rPA is set to True.
If full_output, also returns:
mean_dev: numpy ndarray of 3 elements
Mean deviation for each of the 3 parameters
p_simplex: numpy ndarray n_fc x 3
Parameters retrieved by the simplex for the injected fake
companions; n_fc is the number of injected
offset: numpy ndarray n_fc x 3
Deviations with respect to the values used for injection of the
fake companions.
chi2, nit, success: numpy ndarray of length n_fc
Outputs from the simplex function for the retrieval of the
parameters of each injected companion: chi square value, number of
iterations and whether the simplex converged, respectively.
"""
if not nproc: # Hyper-threading "duplicates" the cores -> cpu_count/2
nproc = (cpu_count()/2)
if verbose:
print('')
print('#######################################################')
print('### SPECKLE NOISE DETERMINATION ###')
print('#######################################################')
print('')
r_true, theta_true, f_true = p_true
if angle_range[0]%360 == angle_range[-1]%360:
angle_range = angle_range[:-1]
if verbose:
print('Number of steps: {}'.format(angle_range.shape[0]))
print('')
imlib = algo_options.get('imlib','opencv')
interpolation = algo_options.get('interpolation','lanczos4')
# FIRST SUBTRACT THE TRUE COMPANION CANDIDATE
planet_parameter = np.array([[r_true, theta_true, f_true]])
cube_pf = cube_planet_free(planet_parameter, cube, derot_angles, psfn, plsc,
imlib=imlib, interpolation=interpolation,
transmission=transmission)
# Measure mu and sigma once in the annulus (instead of each MCMC step)
if isinstance(mu_sigma,tuple):
if len(mu_sigma)!=2:
raise TypeError("If a tuple, mu_sigma must have 2 elements")
elif mu_sigma is not None:
ncomp = algo_options.get('ncomp', None)
annulus_width = algo_options.get('annulus_width', int(fwhm))
if weights is not None:
if not len(weights)==cube.shape[0]:
raise TypeError("Weights should have same length as cube axis 0")
norm_weights = weights/np.sum(weights)
else:
norm_weights=weights
mu_sigma = get_mu_and_sigma(cube, derot_angles, ncomp, annulus_width,
aperture_radius, fwhm, r_true, theta_true,
cube_ref=cube_ref, wedge=wedge, algo=algo,
weights=norm_weights,
algo_options=algo_options)
res = pool_map(nproc, _estimate_speckle_one_angle, iterable(angle_range),
cube_pf, psfn, derot_angles, r_true, f_true, plsc, fwhm,
aperture_radius, cube_ref, fmerit, algo, algo_options,
transmission, mu_sigma, weights, force_rPA, simplex_options,
verbose=verbose)
residuals = np.array(res)
if verbose:
print("residuals (offsets): ", residuals[:,3],residuals[:,4],
residuals[:,5])
p_simplex = np.transpose(np.vstack((residuals[:,0],residuals[:,1],
residuals[:,2])))
offset = np.transpose(np.vstack((residuals[:,3],residuals[:,4],
residuals[:,5])))
print(offset)
chi2 = residuals[:,6]
nit = residuals[:,7]
success = residuals[:,8]
if save:
speckles = {'r_true':r_true,
'angle_range': angle_range,
'f_true':f_true,
'r_simplex':residuals[:,0],
'theta_simplex':residuals[:,1],
'f_simplex':residuals[:,2],
'offset': offset,
'chi2': chi2,
'nit': nit,
'success': success}
if output is None:
output = 'speckles_noise_result'
from pickle import Pickler
with open(output,'wb') as fileSave:
myPickler = Pickler(fileSave)
myPickler.dump(speckles)
# Calculate 1 sigma of distribution of deviations
print(offset.shape)
if force_rPA:
offset = offset[:,2]
print(offset.shape)
if bins is None:
bins = offset.shape[0]
mean_dev, sp_unc = confidence(offset, cfd=68.27, bins=bins,
gaussian_fit=True, verbose=True, save=False,
output_dir='', force=True)
if plot:
plt.show()
if full_output:
return sp_unc, mean_dev, p_simplex, offset, chi2, nit, success
else:
return sp_unc
def _estimate_speckle_one_angle(angle, cube_pf, psfn, angs, r_true, f_true,
plsc, fwhm, aperture_radius, cube_ref, fmerit,
algo, algo_options, transmission, mu_sigma,
weights, force_rPA, simplex_options,
verbose=True):
if verbose:
print('Process is running for angle: {:.2f}'.format(angle))
cube_fc = cube_inject_companions(cube_pf, psfn, angs, flevel=f_true,
plsc=plsc, rad_dists=[r_true],
n_branches=1, theta=angle,
transmission=transmission, verbose=False)
ncomp = algo_options.get('ncomp', None)
annulus_width = algo_options.get('annulus_width', int(fwhm))
res_simplex = firstguess_simplex((r_true,angle,f_true), cube_fc, angs, psfn,
plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=cube_ref,
fmerit=fmerit, algo=algo,
algo_options=algo_options,
transmission=transmission,
mu_sigma=mu_sigma, weights=weights,
force_rPA=force_rPA,
options=simplex_options,
verbose=False)
if force_rPA:
simplex_res_f, = res_simplex.x
simplex_res_r, simplex_res_PA = r_true, angle
else:
simplex_res_r, simplex_res_PA, simplex_res_f = res_simplex.x
offset_r = simplex_res_r - r_true
offset_PA = simplex_res_PA - angle
offset_f = simplex_res_f - f_true
chi2 = res_simplex.fun
nit = res_simplex.nit
success = res_simplex.success
return (simplex_res_r, simplex_res_PA, simplex_res_f, offset_r, offset_PA,
offset_f, chi2, nit, success) | mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_pdf.py | 2 | 98412 | # -*- coding: utf-8 -*-
"""
A PDF matplotlib backend
Author: Jouni K Seppänen <jks@iki.fi>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import codecs
import os
import re
import struct
import sys
import time
import warnings
import zlib
import collections
from io import BytesIO
from functools import total_ordering
import numpy as np
from six import unichr
from datetime import datetime, tzinfo, timedelta
from math import ceil, cos, floor, pi, sin
import matplotlib
from matplotlib import __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import (Bunch, get_realpath_and_stat,
is_writable_file_like, maxdict)
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font, get_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import (FIXED_WIDTH, ITALIC, LOAD_NO_SCALE,
LOAD_NO_HINTING, KERNING_UNFITTED)
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib.dates import UTC
from matplotlib import _path
from matplotlib import _png
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()':
return b'\\' + m
elif m == b'\n':
return br'\n'
elif m == b'\r':
return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (six.integer_types, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, six.text_type):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(obj[key])
for key in sorted(obj)])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
z = obj.utcoffset()
if z is not None:
z = z.seconds
else:
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
msg = "Don't know a PDF representation for %s objects." % type(obj)
raise TypeError(msg)
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
@total_ordering
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + six.text_type(self.name)
def __eq__(self, other):
return isinstance(other, Name) and self.name == other.name
def __lt__(self, other):
return isinstance(other, Name) and self.name < other.name
def __hash__(self):
return hash(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
class Verbatim(object):
"""Store verbatim PDF command content for later inclusion in the
stream."""
def __init__(self, x):
self._x = x
def pdfRepr(self):
return self._x
# PDF operators (not an exhaustive list)
_pdfops = dict(
close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**{name: Operator(value) for name, value in six.iteritems(_pdfops)})
def _paint_path(fill, stroke):
"""Return the PDF operator to paint a path in the following way:
fill: fill the path with the fill color
stroke: stroke the outline of the path with the line color"""
if stroke:
if fill:
return Op.fill_stroke
else:
return Op.stroke
else:
if fill:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None, png=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header; png: if the data is already
png compressed, the decode parameters"""
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra.copy()
if png is not None:
self.extra.update({'Filter': Name('FlateDecode'),
'DecodeParms': png})
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression'] and not png:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename, metadata=None):
self.nextObject = 1 # next free object id
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if isinstance(filename, six.string_types):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
# get source date from SOURCE_DATE_EPOCH, if set
# See https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
if source_date_epoch:
source_date = datetime.utcfromtimestamp(int(source_date_epoch))
source_date = source_date.replace(tzinfo=UTC)
else:
source_date = datetime.today()
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend %s' % __version__,
'CreationDate': source_date
}
if metadata is not None:
self.infoDict.update(metadata)
self.infoDict = {k: v for (k, v) in self.infoDict.items()
if v is not None}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # maps dvi font names to embedding information
self._texFontMap = None # maps TeX font names to PostScript fonts
# differently encoded Type-1 fonts may share the same descriptor
self.type1Descriptors = {}
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
# reproducible writeHatches needs an ordered dict:
self.hatchPatterns = collections.OrderedDict()
self.nextHatch = 1
self.gouraudTriangles = []
self._images = collections.OrderedDict() # reproducible writeImages
self.nextImage = 1
self.markers = collections.OrderedDict() # reproducible writeMarkers
self.multi_byte_charprocs = {}
self.paths = []
self.pageAnnotations = [] # A list of annotations for the
# current page
# The PDF spec recommends to include every procset
procsets = [Name(x)
for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')},
'Annots': self.pageAnnotations,
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
# Clear the list of annotations for the next page
self.pageAnnotations = []
def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
# Create a new annotation of type text
theNote = {'Type': Name('Annot'),
'Subtype': Name('Text'),
'Contents': text,
'Rect': positionRect,
}
annotObject = self.reserveObject('annotation')
self.writeObject(annotObject, theNote)
self.pageAnnotations.append(annotObject)
def finalize(self):
"Write out the various deferred objects and the pdf end matter."
self.endStream()
self.writeFonts()
self.writeObject(
self.alphaStateObject,
{val[0]: val[1] for val in six.itervalues(self.alphaStates)})
self.writeHatches()
self.writeGouraudTriangles()
xobjects = {
name: ob for image, name, ob in six.itervalues(self._images)}
for tup in six.itervalues(self.markers):
xobjects[tup[0]] = tup[1]
for name, value in six.iteritems(self.multi_byte_charprocs):
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
def close(self):
"Flush all buffers and free all resources."
self.endStream()
if self.passed_in_file_object:
self.fh.flush()
else:
if self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill([pdfRepr(x) for x in data]))
self.write(b'\n')
def beginStream(self, id, len, extra=None, png=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra, png)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename of the font.
"""
if isinstance(fontprop, six.string_types):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %r' % (Fx, filename),
'debug')
return Fx
@property
def texFontMap(self):
# lazy-load texFontMap, it takes a while to parse
# and usetex is a relatively rare use case
if self._texFontMap is None:
self._texFontMap = dviread.PsfontsMap(
dviread.find_tex_file('pdftex.map'))
return self._texFontMap
def dviFontName(self, dvifont):
"""
Given a dvi font object, return a name suitable for Op.selectfont.
This registers the font information in self.dviFontInfo if not yet
registered.
"""
dvi_info = self.dviFontInfo.get(dvifont.texname)
if dvi_info is not None:
return dvi_info.pdfname
psfont = self.texFontMap[dvifont.texname]
if psfont.filename is None:
raise ValueError(
("No usable font file found for {0} (TeX: {1}). "
"The font may lack a Type-1 version.")
.format(psfont.psname, dvifont.texname))
pdfname = Name('F%d' % self.nextFont)
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font {0} = {1} (dvi)'.format(pdfname, dvifont.texname),
'debug')
self.dviFontInfo[dvifont.texname] = Bunch(
dvifont=dvifont,
pdfname=pdfname,
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects)
return pdfname
def writeFonts(self):
fonts = {}
for dviname, info in sorted(self.dviFontInfo.items()):
Fx = info.pdfname
matplotlib.verbose.report('Embedding Type-1 font %s from dvi'
% dviname, 'debug')
fonts[Fx] = self._embedTeXFont(info)
for filename in sorted(self.fontNames):
Fx = self.fontNames[filename]
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def _embedTeXFont(self, fontinfo):
msg = ('Embedding TeX font {0} - fontinfo={1}'
.format(fontinfo.dvifont.texname, fontinfo.__dict__))
matplotlib.verbose.report(msg, 'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [Name(ch) for ch in enc]
differencesArray = [0] + differencesArray
fontdict['Encoding'] = \
{'Type': Name('Encoding'),
'Differences': differencesArray}
# If no file is specified, stop short
if fontinfo.fontfile is None:
msg = ('Because of TeX configuration (pdftex.map, see updmap '
'option pdftexDownloadBase14) the font {0} is not '
'embedded. This is deprecated as of PDF 1.5 and it may '
'cause the consumer application to show something that '
'was not intended.').format(fontinfo.basefont)
warnings.warn(msg)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = get_font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
# 'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = get_font(filename)
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest:
return np.round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0:
return floor(value)
else:
return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed
# to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
s = decode_char(charcode)
width = font.load_char(
s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(
filename.encode(sys.getfilesystemencoding()), glyph_ids)
charprocs = {}
for charname in sorted(rawcharprocs):
stream = rawcharprocs[charname]
charprocDict = {'Length': len(stream)}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data:
break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\0'] * 65536
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph = font.load_char(ccode,
flags=LOAD_NO_SCALE | LOAD_NO_HINTING)
widths.append((ccode, cvt(glyph.horiAdvance)))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if sf & ITALIC:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
msg = ("'%s' can not be subsetted into a Type 3 font. "
"The entire font will be embedded in the output.")
warnings.warn(msg % os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
edge, face, hatch = hatch_style
if edge is not None:
edge = tuple(edge)
if face is not None:
face = tuple(face)
hatch_style = (edge, face, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res,
# Change origin to match Agg at top-left.
'Matrix': [1, 0, 0, 1, 0, self.height * 72]})
stroke_rgb, fill_rgb, path = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(rcParams['hatch.linewidth'], Op.setlinewidth)
self.output(*self.pathOperations(
Path.hatch(path),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.fill_stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[(str('flags'), str('u1')),
(str('points'), str('>u4'), (2,)),
(str('colors'), str('u1'), (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
entry = self._images.get(id(image), None)
if entry is not None:
return entry[1]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self._images[id(image)] = (image, name, ob)
return name
def _unpack(self, im):
"""
Unpack the image object im into height, width, data, alpha,
where data and alpha are HxWx3 (RGB) or HxWx1 (grayscale or alpha)
arrays, except alpha is None if the image is fully opaque.
"""
h, w = im.shape[:2]
im = im[::-1]
if im.ndim == 2:
return h, w, im, None
else:
rgb = im[:, :, :3]
rgb = np.array(rgb, order='C')
# PDF needs a separate alpha image
if im.shape[2] == 4:
alpha = im[:, :, 3][..., None]
if np.all(alpha == 255):
alpha = None
else:
alpha = np.array(alpha, order='C')
else:
alpha = None
return h, w, rgb, alpha
def _writePng(self, data):
"""
Write the image *data* into the pdf file using png
predictors with Flate compression.
"""
buffer = BytesIO()
_png.write_png(data, buffer)
buffer.seek(8)
written = 0
header = bytearray(8)
while True:
n = buffer.readinto(header)
assert n == 8
length, type = struct.unpack(b'!L4s', bytes(header))
if type == b'IDAT':
data = bytearray(length)
n = buffer.readinto(data)
assert n == length
self.currentstream.write(bytes(data))
written += n
elif type == b'IEND':
break
else:
buffer.seek(length, 1)
buffer.seek(4, 1) # skip CRC
def _writeImg(self, data, height, width, grayscale, id, smask=None):
"""
Write the image *data* of size *height* x *width*, as grayscale
if *grayscale* is true and RGB otherwise, as pdf object *id*
and with the soft mask (alpha channel) *smask*, which should be
either None or a *height* x *width* x 1 array.
"""
obj = {'Type': Name('XObject'),
'Subtype': Name('Image'),
'Width': width,
'Height': height,
'ColorSpace': Name('DeviceGray' if grayscale
else 'DeviceRGB'),
'BitsPerComponent': 8}
if smask:
obj['SMask'] = smask
if rcParams['pdf.compression']:
png = {'Predictor': 10,
'Colors': 1 if grayscale else 3,
'Columns': width}
else:
png = None
self.beginStream(
id,
self.reserveObject('length of image stream'),
obj,
png=png
)
if png:
self._writePng(data)
else:
self.currentstream.write(data.tostring())
self.endStream()
def writeImages(self):
for img, name, ob in six.itervalues(self._images):
height, width, data, adata = self._unpack(img)
if adata is not None:
smaskObject = self.reserveObject("smask")
self._writeImg(adata, height, width, True, smaskObject.id)
else:
smaskObject = None
self._writeImg(data, height, width, False,
ob.id, smaskObject)
def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fill, stroke, joinstyle, capstyle),
(name, ob, bbox, lw)) in six.iteritems(self.markers):
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(fill, stroke))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
return [Verbatim(_path.convert_to_string(
path, transform, clip, simplify, sketch,
6,
[Op.moveto.op, Op.lineto.op, b'', Op.curveto.op, Op.closepath.op],
True))]
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name),
file=sys.stderr)
borken = True
else:
if name == 'the zero object':
key = "f"
else:
key = "n"
text = "%010d %05d %s \n" % (offset, generation, key)
self.write(text.encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
def is_string_like(x):
return isinstance(x, six.string_types)
def is_date(x):
return isinstance(x, datetime)
check_trapped = (lambda x: isinstance(x, Name) and
x.name in ('True', 'False', 'Unknown'))
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in self.infoDict:
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" %
self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi, height, width):
RendererBase.__init__(self)
self.height = height
self.width = width
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc.get_rgb() is None:
# it should not matter what color here
# since linewidth should be 0
# unless affected by global settings in rcParams
# hence setting zero alpha just incase
gc.set_foreground((0, 0, 0, 0), isRGBA=True)
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, six.string_types):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, transform=None):
h, w = im.shape[:2]
if w == 0 or h == 0:
return
if transform is None:
# If there's no transform, alpha has already been applied
gc.set_alpha(1.0)
self.check_gc(gc)
w = 72.0 * w / self.image_dpi
h = 72.0 * h / self.image_dpi
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
self.file.output(Op.gsave,
1, 0, 0, 1, x, y, Op.concat_matrix,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fill = gc.fill(rgbFace)
stroke = gc.stroke()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fill, stroke, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if (x < 0 or y < 0 or
x > self.file.width * 72 or y > self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output(cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
with dviread.Dvi(dvifile, 72) as dvi:
page = next(iter(dvi))
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.dviFontName(dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
# We need to convert the glyph numbers to bytes, and the easiest
# way to do this on both Python 2 and 3 is .encode('latin-1')
seq += [['text', x1, y1,
[six.unichr(glyph).encode('latin-1')], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform_point((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = six.text_type(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1 and
chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show,
Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype),
Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = font.get_char_index(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode,
flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
filename = findfont(prop)
font = get_font(filename)
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width * 72.0, self.file.height * 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def stroke(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fill(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(self.fill(), self.stroke())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch, hatch_color):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (hatch_color, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
# must come after fillcolor and rgb
(('_hatch', '_hatch_color'), hatch_cmd),
)
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
fill_performed = False
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if (ours is None or theirs is None):
different = bool(not(ours is theirs))
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
# Need to update hatching if we also updated fillcolor
if params == ('_hatch', '_hatch_color') and fill_performed:
different = True
if different:
if params == ('_fillcolor',):
fill_performed = True
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class PdfPages(object):
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
forgetting the format argument.
"""
__slots__ = ('_file', 'keep_empty')
def __init__(self, filename, keep_empty=True, metadata=None):
"""
Create a new PdfPages object.
Parameters
----------
filename : str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. The file is opened at once and any older file with
the same name is overwritten.
keep_empty : bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
metadata : dictionary, optional
Information dictionary object (see PDF reference section 10.2.1
'Document Information Dictionary'), e.g.:
`{'Creator': 'My software', 'Author': 'Me',
'Title': 'Awesome fig'}`
The standard keys are `'Title'`, `'Author'`, `'Subject'`,
`'Keywords'`, `'Creator'`, `'Producer'`, `'CreationDate'`,
`'ModDate'`, and `'Trapped'`. Values have been predefined
for `'Creator'`, `'Producer'` and `'CreationDate'`. They
can be removed by setting them to `None`.
"""
self._file = PdfFile(filename, metadata=metadata)
self.keep_empty = keep_empty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.finalize()
self._file.close()
if (self.get_pagecount() == 0 and not self.keep_empty and
not self._file.passed_in_file_object):
os.remove(self._file.fh.name)
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure : :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf',
**kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
"""
Add a new text note to the page to be saved next. The optional
positionRect specifies the position of the new note on the
page. It is outside the page per default to make sure it is
invisible on printouts.
"""
self._file.newTextnote(text, positionRect)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
fixed_dpi = 72
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename, metadata=kwargs.pop("metadata", None))
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererPdf(file, image_dpi, height, width),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
file.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
@_Backend.export
class _BackendPdf(_Backend):
FigureCanvas = FigureCanvasPdf
FigureManager = FigureManagerPdf
| mit |
krafczyk/spack | var/spack/repos/builtin/packages/py-iminuit/package.py | 5 | 1801 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyIminuit(PythonPackage):
"""Interactive IPython-Friendly Minimizer based on SEAL Minuit2."""
homepage = "https://pypi.python.org/pypi/iminuit"
url = "https://pypi.io/packages/source/i/iminuit/iminuit-1.2.tar.gz"
version('1.2', '4701ec472cae42015e26251703e6e984')
# Required dependencies
depends_on('py-setuptools', type='build')
# Optional dependencies
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-cython', type='build')
| lgpl-2.1 |
stineb/sofun | get_sitelist_simsuite.py | 1 | 1708 | import pandas
import os
import os.path
##--------------------------------------------------------------------
## Simulation suite
## - "swissface"
## - "fluxnet"
## - "fluxnet2015"
## - "fluxnet_cnmodel"
## - "gcme"
## - "campi"
## - "campi_cmodel"
## - "fluxnet_fixalloc"
## - "atkin"
## - "atkinfull"
## - "olson"
## - "olson_cmodel"
##--------------------------------------------------------------------
## For global simulations, set simsuite to 'global'.
## This links NetCDF input files from directories mirrored locally from
## /work/bstocker/labprentice/data on Imperial's HPC CX1 server into the
## input directory structure required for SOFUN.
##--------------------------------------------------------------------
## For an example simulation (simulation name 'EXAMPLE_global'), set
## simsuite to 'example'. This should work after cloning this repo
## from github.
##--------------------------------------------------------------------
simsuite = 'fluxnet2015'
##--------------------------------------------------------------------
## Get site/experiment names
##--------------------------------------------------------------------
filnam_siteinfo_csv = '../input_' + simsuite + '_sofun/experiments_' + simsuite + '_sofun.csv'
if os.path.exists( filnam_siteinfo_csv ):
print 'reading site information file ' + filnam_siteinfo_csv + ' ...'
siteinfo = pandas.read_csv( filnam_siteinfo_csv )
elif simsuite == 'example':
print 'Executing single example simulation...'
else:
print 'site info file does not exist: ' + filnam_siteinfo_csv
print 'writing file sitelist.txt'
fil = open('sitelist.txt', 'w')
for index, row in siteinfo.iterrows():
fil.write( row['expname'] + '\n' )
| lgpl-2.1 |
MohamedAbdultawab/FOC_RiceUniv | algorithmic-thinking-1/module-2-project-and-application/02_application-2-analysis-of-a-computer-network/main.py | 1 | 13373 | #!/usr/bin python3.5
import random
import time
from itertools import chain
import matplotlib.pyplot as plt
from collections import deque
from copy import deepcopy
class Queue(object):
"""
Queue wrapper implementation of deque
"""
def __init__(self, arg=list()):
self._queue = deque(arg)
def __iter__(self): # TODO: find a way to do it without using sorted
for value in sorted(self._queue, reverse=False):
yield value
def __len__(self):
return len(self._queue)
def __str__(self):
return str(self._queue)
def enqueue(self, value):
"""
docstring for enqueue
"""
self._queue.appendleft(value)
def dequeue(self):
"""
docstring for dequeue
"""
return self._queue.pop()
def is_empty(self):
"""
Return True when the queue is empty
False otherwise
"""
return True if len(self._queue) == 0 else False
def clear(self):
"""
Clear out the queue
"""
self._queue.clear()
class UPATrial:
"""
Simple class to encapsulate optimizated trials for the UPA algorithm
Maintains a list of node numbers with multiple instance of each number.
The number of instances of each node number are
in the same proportion as the desired probabilities
Uses random.choice() to select a node number from this list for each trial.
"""
def __init__(self, num_nodes):
"""
Initialize a UPATrial object corresponding to a
complete graph with num_nodes nodes
Note the initial list of node numbers has num_nodes copies of
each node number
"""
self._num_nodes = num_nodes
self._node_numbers = [node for node in range(num_nodes) for dummy_idx in range(num_nodes)]
def run_trial(self, num_nodes):
"""
Conduct num_nodes trials using by applying random.choice()
to the list of node numbers
Updates the list of node numbers so that each node number
appears in correct ratio
Returns:
Set of nodes
"""
# compute the neighbors for the newly-created node
new_node_neighbors = set()
for _ in range(num_nodes):
new_node_neighbors.add(random.choice(self._node_numbers))
# update the list of node numbers so that each node number
# appears in the correct ratio
self._node_numbers.append(self._num_nodes)
for dummy_idx in range(len(new_node_neighbors)):
self._node_numbers.append(self._num_nodes)
self._node_numbers.extend(list(new_node_neighbors))
# update the number of nodes
self._num_nodes += 1
return new_node_neighbors
def timeit(func, *args, **kwargs):
start = time.time()
func(*args, **kwargs)
return time.time() - start
def make_graph(nodes, edges):
"""Returns a graph from a list of nodes
and a list of edges represented as tuples"""
graph = dict()
for node in nodes:
graph[node] = set()
for edge in edges:
graph[edge[0]].add(edge[1])
return graph
def remove_node(graph, node):
for neighbor in graph[node]:
graph[neighbor].remove(node)
del graph[node]
def make_complete_graph(num_nodes):
"""Returns a complete graph"""
nodes = list(range(num_nodes))
edges = [(node_1, node_2) for node_1 in nodes for node_2 in nodes if node_1 != node_2]
return make_graph(nodes, edges)
def make_er(num_nodes, probability):
nodes = list(range(num_nodes))
edges = list(chain.from_iterable([(i, j), (j, i)]
for i in nodes
for j in nodes
if i != j and random.random() < probability))
return make_graph(nodes, edges)
def make_upa(num_edges, num_nodes):
graph = make_complete_graph(num_edges)
trials = UPATrial(num_edges)
for node in range(num_edges, num_nodes):
new_nodes = trials.run_trial(num_edges)
graph[node] = new_nodes
for neighbor in new_nodes:
graph[neighbor].add(node)
return graph
def load_graph_data(file_name):
"""
Function that loads a graph given a text
representation of the graph
Returns a dictionary that models a graph
"""
graph_file = open(file_name)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_file.close()
graph_lines = graph_lines[:-1]
print("Loaded graph with", len(graph_lines), "nodes")
nodes = []
edges = []
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
nodes.append(node)
for neighbor in neighbors[1:-1]:
edges.append((node, int(neighbor)))
edges.append((node, int(neighbor))[::-1])
return nodes, edges
def bfs_visited(ugraph, start_node):
"""
Breadth-first search implementation
Takes the undirected graph #ugraph and the node #start_node
Returns the set consisting of all nodes that are visited
by a breadth-first search that starts at start_node.
"""
queue = Queue()
visited = set()
visited.add(start_node)
queue.enqueue(start_node)
while not queue.is_empty():
node = queue.dequeue()
for neighbor in ugraph[node]:
if neighbor not in visited:
visited.add(neighbor)
queue.enqueue(neighbor)
return visited
def cc_visited(ugraph):
"""
Compute connected components
Takes the undirected graph #ugraph
Returns a list of sets, where each set consists of all
the nodes in a connected component, and there is exactly
one set in the list for each connected component in ugraph and nothing else.
"""
remaining_nodes = set(ugraph.keys())
connected_components = list()
while remaining_nodes:
node = random.choice(list(remaining_nodes))
visited = bfs_visited(ugraph, node)
connected_components.append(visited)
remaining_nodes.difference_update(visited)
return connected_components
def largest_cc_size(ugraph):
"""Takes the undirected graph #ugraph.
Returns the size (an integer) of the largest connected component in ugraph.
"""
connected_components = cc_visited(ugraph)
if not connected_components:
return 0
return len(sorted(connected_components, key=len, reverse=True)[0])
def compute_resilience(ugraph, attack_order):
"""Takes the undirected graph #ugraph, a list of nodes #attack_order
For each node in the list, the function removes the given node and its edges
from the graph and then computes the size of the largest connected component
for the resulting graph.
Returns a list whose k+1th entry is the size of the largest connected component
in the graph after the removal of the first k nodes in attack_order.
The first entry (indexed by zero) is the size of the largest connected component
in the original graph.
"""
ugraph = deepcopy(ugraph)
cc_lst = list()
for node in attack_order:
cc_lst.append(largest_cc_size(ugraph))
remove_node(ugraph, node)
cc_lst.append(largest_cc_size(ugraph))
return cc_lst
def random_order(graph):
nodes = list(graph.keys())
random.shuffle(nodes)
return nodes
def fast_targeted_order(graph):
graph = deepcopy(graph)
graph_length = len(graph)
degree_sets = [set([]) for degree in range(graph_length)]
for i in graph.keys():
d = len(graph[i])
degree_sets[d].add(i)
order = []
for k in range(len(graph) - 1, -1, -1):
while degree_sets[k]:
node = degree_sets[k].pop()
for neighbor in graph[node]:
d = len(graph[neighbor])
degree_sets[d].remove(neighbor)
degree_sets[d - 1].add(neighbor)
order.append(node)
remove_node(graph, node)
return order
def targeted_order(graph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = deepcopy(graph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##################################################
# Application 2 questions
def Q1():
# Generating graphs
nodes, edges = load_graph_data('alg_rf7.txt')
num_nodes = len(nodes)
# Computer Network graph
comp_net_graph = make_graph(nodes, edges)
# Erdos and Renyi graph
er_graph = make_er(num_nodes, .002)
# Preferential Attachment graph
pa_graph = make_upa(3, num_nodes)
comp_attack_order = random_order(comp_net_graph)
er_attack_order = random_order(er_graph)
pa_attack_order = random_order(pa_graph)
comp_resilience = compute_resilience(comp_net_graph, comp_attack_order)
er_resilience = compute_resilience(er_graph, er_attack_order)
pa_resilience = compute_resilience(pa_graph, pa_attack_order)
plt.figure(figsize=(7, 7), dpi=300)
plt.plot(comp_resilience, color='blue', label='Computer Network')
plt.plot(er_resilience, color='green', label='ER random graph')
plt.plot(pa_resilience, color='red', label='UPA graph')
plt.title('Resilience of different graphs',
fontsize=18,
color='#ff8800')
plt.xlabel('Number of nodes removed',
fontsize=14,
color='#ff8800')
plt.ylabel('Size of the largest connected component',
fontsize=14,
color='#ff8800')
plt.legend(loc='best', labels=['Computer Network',
'ER random graph, P = .02',
'UPA graph, M = 3'])
# plt.show()
plt.savefig('Q1', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
# print(len(comp_net_graph), sum([len(x) for x in comp_net_graph.values()]) // 2, largest_cc_size(comp_net_graph))
# print(len(er_graph), sum([len(x) for x in er_graph.values()]) // 2, largest_cc_size(er_graph))
# print(len(pa_graph), sum([len(x) for x in pa_graph.values()]) // 2, largest_cc_size(pa_graph))
def Q3():
"""
fast_targeted_order: fast
targeted_order: slow
"""
graph_lengths = range(10, 1000, 10)
graphs = [make_upa(5, x) for x in graph_lengths]
fast_times = [timeit(fast_targeted_order, graph) for graph in graphs]
slow_times = [timeit(targeted_order, graph) for graph in graphs]
# Plotting
plt.plot(graph_lengths, fast_times, color='b', label='fast_targeted_order')
plt.plot(graph_lengths, slow_times, color='g', label='targeted_order')
plt.title('Regular and fast targeted order - Desktop',
fontsize=18,
color='#ff8800')
plt.xlabel('Size of graph, with M = 5',
fontsize=14,
color='#ff8800')
plt.ylabel('Time in seconds',
fontsize=14,
color='#ff8800')
plt.legend(loc='best', labels=['fast_targeted_order',
'targeted_order'])
plt.show()
# plt.savefig('Q3', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
def Q4():
# Generating graphs
nodes, edges = load_graph_data('alg_rf7.txt')
num_nodes = len(nodes)
# Computer Network graph
comp_net_graph = make_graph(nodes, edges)
# Erdos and Renyi graph
er_graph = make_er(num_nodes, .002)
# Preferential Attachment graph
pa_graph = make_upa(3, num_nodes)
comp_attack_order = fast_targeted_order(comp_net_graph)
er_attack_order = fast_targeted_order(er_graph)
pa_attack_order = fast_targeted_order(pa_graph)
comp_resilience = compute_resilience(comp_net_graph, comp_attack_order)
er_resilience = compute_resilience(er_graph, er_attack_order)
pa_resilience = compute_resilience(pa_graph, pa_attack_order)
# Plotting
plt.plot(comp_resilience, color='blue', label='Computer Network')
plt.plot(er_resilience, color='green', label='ER random graph')
plt.plot(pa_resilience, color='red', label='UPA graph')
plt.title('Resilience of different graphs under tageted attacks\nusing fast_targeted_order',
fontsize=18,
color='#ff8800')
plt.xlabel('Number of nodes removed',
fontsize=14,
color='#ff8800')
plt.ylabel('Size of the largest connected component',
fontsize=14,
color='#ff8800')
plt.legend(loc='best', labels=['Computer Network',
'ER random graph, P = .02',
'UPA graph, M = 3'])
# plt.show()
plt.savefig('Q4', dpi=300, format='png', transparent=False, orientation='landscape', bbox_inches='tight', pad_inches=0.3)
# print(timeit(Q1))
| gpl-3.0 |
grehx/spark-tk | regression-tests/sparktkregtests/testcases/graph/graph_connected_test.py | 1 | 2428 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test connected_components graphx, Valuesare checked against networkx"""
import unittest
from sparktkregtests.lib import sparktk_test
class ConnectedComponents(sparktk_test.SparkTKTestCase):
def test_connected_component(self):
""" Tests the graphx connected components in ATK"""
super(ConnectedComponents, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.vertices.sort("id")
self.frame.add_columns(lambda x: 2, ("value", int))
self.graph = self.context.graph.create(self.vertices, self.frame)
components = self.graph.connected_components()
components.sort('id')
components.add_columns(
lambda x: x['id'].split('_')[1], ("element", str))
frame = components.to_pandas(components.count())
group = frame.groupby('component').agg(lambda x: x.nunique())
# Each component should only have 1 element value, the name of the
# component
for _, row in group.iterrows():
self.assertEqual(row['element'], 1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mblondel/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
B0BBB/ProxyVotingSim | Main.py | 1 | 6988 | from collections import defaultdict
from random import sample
from time import time
import csv
import matplotlib.pyplot as plt
import numpy as np
from sympy.printing.pretty.pretty_symbology import pretty_symbol
# Library located at https://pypi.python.org/pypi/Distance/
from Simulations import create_mel_dist, create_f_pop, create_ballots, reset_active_agents, \
get_proxy_ranks
from utils import borda_voting_rule
from config import *
def main():
print 'All set, the parameters are:', pretty_symbol('phi'), '=', PHI, 'A =', A, 'N =', N, 'Iterations =', Runs
print 'Population Size =', PopSize, 'Truth =', Truth, 'Vnearest =', Vnearest
print 'Weights will be calculated for', WN, 'Agents \n'
print 'Creating Mellow\'s Distribution for the Data Set \n'
distance_table = defaultdict(list)
weight_table = defaultdict(list)
counter = 1
create_mel_dist(Truth, PHI, distance)
print 'Creating Population \n'
data = create_f_pop(PopSize, Mel)
print 'Running Simulations \n'
for n in range(2, N + 1):
stime = time()
for run in range(Runs):
active_agents = sample(data, n)
for scenario in Scenarios:
# If number of agents smaller than Vnearest append 0 and continue
if scenario.upper() == 'V':
if n < Vnearest:
distance_table[(n, scenario)] += []
continue
reset_active_agents(data, scenario)
ballots = create_ballots(data, active_agents, scenario)
result = borda_voting_rule(ballots, A)
distance_table[(n, scenario)] += [distance(result, Truth)]
if scenario.upper() == 'P' and n == WN:
# currently saves the weight, the key is the number of the simulation
weight_table[('P', counter)] += get_proxy_ranks(active_agents)
elif scenario.upper() == 'V' and n == WN:
weight_table[('V', counter)] = get_proxy_ranks(active_agents)
counter += 1
if (time() - stime) < 60:
print 'Simulation number', counter, 'Number of active agents:', n, 'Exec Time:', int(time() - stime), 'Sec '
else:
print 'Simulation number', counter, 'Number of active agents:', n, 'Exec Time:', (
time() - stime) / 60, 'Min '
dist_table = {}
weight_table_p = [0] * WN
weight_table_v = [0] * WN
# Creates the table dict, which contains the average distances from all the Runs (simulations),
# if it's an empty list will add 'None' - for the V scenario
for n, s in distance_table:
if distance_table[(n, s)]:
dist_table[(n, s)] = np.average(distance_table[(n, s)])
else:
dist_table[(n, s)] = None
for i in range(WN):
for k in weight_table:
if k[0] == 'P':
weight_table_p[i] += weight_table[k][i]
else:
weight_table_v[i] += weight_table[k][i]
for i in range(WN):
weight_table_v[i] = weight_table_v[i] / float(Runs)
weight_table_p[i] = weight_table_p[i] / float(Runs)
# Creating an empty list for each scenario
Blist, Plist, Vlist, Elist = [], [], [], []
# Create list with the average distances for each scenario
for i in range(2, N + 1):
for scenario in Scenarios:
if scenario.upper() == 'B':
Blist.append(dist_table[(i, scenario)])
elif scenario.upper() == 'P':
Plist.append(dist_table[(i, scenario)])
elif scenario.upper() == 'V':
Vlist.append(dist_table[(i, scenario)])
elif scenario.upper() == 'E':
Elist.append(dist_table[(i, scenario)])
index_a = np.arange(2, N + 1)
# Define the current figure, all functions/commands will apply to the current figure
plt.figure(1)
avg_errors = []
# Each plot function is a specific line (scenario)
avg_errors.extend(plt.plot(index_a, Blist, color='b', linestyle='--', marker='o', markerfacecolor='b', label='B'))
avg_errors.extend(plt.plot(index_a, Plist, color='m', linestyle='-.', marker='D', markerfacecolor='m', label='P'))
avg_errors.extend(plt.plot(index_a, Vlist, color='c', linestyle=':', marker='p', markerfacecolor='c', label='V'))
avg_errors.extend(plt.plot(index_a, Elist, color='g', linestyle='-', marker='s', markerfacecolor='g', label='E'))
plt.setp(avg_errors, linewidth=2, markersize=5)
plt.xlabel('Number of Active Subset')
plt.ylabel('Avg Dist from T')
plt.title(
'Distance from the Truth with ' + str(Runs) + ' simulations \nPopulation size: ' + str(PopSize) + pretty_symbol(
'phi') + str(PHI) + ' A=' + str(A))
plt.xticks(index_a, range(2, N + 1))
# Legend Box appearance
plt.legend(shadow=True, fancybox=True)
# Auto layout design function
plt.tight_layout()
# # Generates a unique image name
# figname = '.\Plots\Error'
# figname += 'A=' + str(A) + 'PHI=' + str(PHI) + 'V=' + str(Vnearest)
# figname += '-' + str(time())
# figname += '.png'
# # Saves the generated figures
# plt.savefig(figname, dpi=200)
# # Closes the current figure
# plt.close()
# set a new current figure
plt.figure(2)
# Plotting the weights graphs
index_b = np.arange(1, WN + 1)
avg_weights = []
avg_weights.extend(
plt.plot(index_b, weight_table_p, color='y', linestyle='-', marker='s', markerfacecolor='y', label='Proxy'))
avg_weights.extend(
plt.plot(index_b, weight_table_v, color='r', linestyle='-', marker='s', markerfacecolor='r',
label='Virtual Proxy'))
plt.setp(avg_weights, linewidth=2, markersize=5)
plt.xlabel('The rank of the Active agent')
plt.ylabel('Average Weight')
plt.title('The average weights of the proxies ' + str(Runs) + ' simulations \nPopulation size: ' + str(PopSize))
plt.xticks(index_b, range(1, WN + 1))
# Legend Box appearance
plt.legend(shadow=True, fancybox=True)
# Auto layout design function
plt.tight_layout()
# # Generates a unique image name
# figname = '.\Plots\Weights'
# figname += 'A= ' + str(A) + ' PHI= ' + str(PHI) + ' WN= ' + str(WN)
# figname += ' -' + str(time())
# figname += '.png'
# # Saves the generated figures
# plt.savefig(figname, dpi=150)
# # Closes the current figure
# plt.close()
# The rendering function - shows the output on the screen
plt.show()
# Record the population in a CSV file
with open('some.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Agents Location'])
for i in data:
x = tuple(['D=', distance(Truth, i.location)])
writer.writerow(i.location + x)
if __name__ == '__main__':
main()
| mit |
dandanvidi/in-vivo-enzyme-kinetics | scripts/pFVA.py | 3 | 1834 | import pandas as pd
from cobra.core import Metabolite, Reaction
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible, revert_to_reversible
from cobra.flux_analysis.variability import flux_variability_analysis
gc = pd.DataFrame.from_csv('../data/growth_conditions.csv')
gc = gc[gc.media_key>0]
m = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
convert_to_irreversible(m)
fake = Metabolite(id='fake')
m.add_metabolites(fake)
for r in m.reactions:
r.add_metabolites({fake:1})
flux_counter = Reaction(name='flux_counter')
flux_counter.add_metabolites(metabolites={fake:-1})
m.add_reaction(flux_counter)
m.change_objective(flux_counter)
m.reactions.get_by_id('EX_glc_e_reverse').upper_bound = 0
rxns = {r.id:r for r in m.reactions}
index = pd.MultiIndex.from_product([gc.index, ['maximum', 'minimum']])
fluxes = pd.DataFrame(index=index, columns=rxns.keys())
for i,c in enumerate(gc.index):
rxns['EX_'+gc['media_key'][c]+'_e'].lower_bound = -1000
rxns['Ec_biomass_iJO1366_WT_53p95M'].upper_bound = gc['growth rate [h-1]'][c]
rxns['Ec_biomass_iJO1366_WT_53p95M'].lower_bound = gc['growth rate [h-1]'][c]
for j, r in enumerate(m.reactions):
fva_results = flux_variability_analysis(m,reaction_list=[r],
objective_sense='minimize',fraction_of_optimum=1.0001)
fva = pd.DataFrame.from_dict(fva_results)
fluxes[r.id][c,'maximum'] = fva.loc['maximum'][0]
fluxes[r.id][c,'minimum'] = fva.loc['minimum'][0]
print c, i, j, r
rxns['EX_'+gc['media_key'][c]+'_e'].lower_bound = 0
fluxes.dropna(how='all', inplace=True)
fluxes.T.to_csv('../data/flux_variability_[mmol_gCDW_h]_01%.csv')
| mit |
lbishal/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
amozie/amozie | stockzie/strategy/LinearModelStrategy.py | 1 | 1778 | # -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tushare as ts
import stockzie as sz
import talib as tl
from stockzie.strategy import BaseStrategy
import datazie as dz
class LinearModelStrategy(BaseStrategy):
def __init__(self, data, cash=100000):
super().__init__(data, cash)
def _init_trading(self):
self.close = []
self.close_prd = []
self.close_std_up = []
self.close_std_down = []
self.close_std = []
def _handle_trading(self):
N = 20
if self._iter_i < N:
lm = None
else:
lm = dz.model.LinearModel(self.__sofar_data.tail(N).close.values)
lm.fit()
if lm is None:
self.close_prd.append(None)
self.close_std_down.append(None)
self.close_std_up.append(None)
self.close_std.append(None)
else:
prd = lm.predict(N, alpha=0.1)
self.close_prd.append(prd[0][0])
self.close_std_down.append(prd[0][2])
self.close_std_up.append(prd[0][3])
self.close_std.append(prd[0][1])
self.close.append(self._iter_datas[0].close)
def _end_trading(self):
self.__plot_dicts[0]['close'] = self.close
self.__plot_dicts[0]['close_prd'] = self.close_prd
self.__plot_dicts[0]['close_std_down'] = self.close_std_down
self.__plot_dicts[0]['close_std_up'] = self.close_std_up
self.__plot_dicts[1]['close_std'] = self.close_std
if __name__ == '__main__':
data = sz.data.get('600056', ktype='60')
st = LinearModelStrategy(data.tail(220))
st.run()
st.plot_demo(2)
| apache-2.0 |
ChristianSch/skml | doc/auto_examples/example_br.py | 3 | 1416 | """
=================================
Ensemble Binary Relevance Example
=================================
An example of :class:`skml.problem_transformation.BinaryRelevance`
"""
from __future__ import print_function
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import numpy as np
from skml.problem_transformation import BinaryRelevance
from skml.datasets import load_dataset
X, y = load_dataset('yeast')
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = BinaryRelevance(LogisticRegression())
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("hamming loss: ")
print(hamming_loss(y_test, y_pred))
print("accuracy:")
print(accuracy_score(y_test, y_pred))
print("f1 score:")
print("micro")
print(f1_score(y_test, y_pred, average='micro'))
print("macro")
print(f1_score(y_test, y_pred, average='macro'))
print("precision:")
print("micro")
print(precision_score(y_test, y_pred, average='micro'))
print("macro")
print(precision_score(y_test, y_pred, average='macro'))
print("recall:")
print("micro")
print(recall_score(y_test, y_pred, average='micro'))
print("macro")
print(recall_score(y_test, y_pred, average='macro'))
| mit |
marqh/cartopy | lib/cartopy/tests/mpl/test_caching.py | 1 | 6536 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import gc
import numpy as np
from matplotlib.testing.decorators import image_comparison as mpl_image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
from matplotlib.path import Path
import shapely.geometry
import cartopy.crs as ccrs
import cartopy.mpl_integration.patch as cpatch
import cartopy.io.shapereader
import cartopy.mpl_integration.geoaxes as cgeoaxes
from cartopy.examples.waves import sample_data
import cartopy.mpl_integration.patch
from cartopy.tests.mpl import image_comparison
class CallCounter(object):
"""
Exposes a context manager which can count the number of calls to a specific
function. (useful for cache checking!)
Internally, the target function is replaced with a new one created by this context
manager which then increments ``self.count`` every time it is called.
Example usage::
show_counter = CallCounter(plt, 'show')
with show_counter:
plt.show()
plt.show()
plt.show()
print show_counter.count # <--- outputs 3
"""
def __init__(self, parent, function_name):
self.count = 0
self.parent = parent
self.function_name = function_name
self.orig_fn = getattr(parent, function_name)
def __enter__(self):
def replacement_fn(*args, **kwargs):
self.count += 1
return self.orig_fn(*args, **kwargs)
setattr(self.parent, self.function_name, replacement_fn)
def __exit__(self, exc_type, exc_val, exc_tb):
setattr(self.parent, self.function_name, self.orig_fn)
def test_coastline_loading_cache():
# a5caae040ee11e72a62a53100fe5edc355304419 added coastline caching.
# this test ensures it is working...
# count the number of times shapereader.Reader is created.
shapereader_counter = CallCounter(cartopy.io.shapereader.Reader, '__init__')
with shapereader_counter:
ax1 = plt.subplot(2, 1, 1, projection=ccrs.PlateCarree())
ax1.coastlines()
ax2 = plt.subplot(2, 1, 1, projection=ccrs.Robinson())
ax2.coastlines()
assert shapereader_counter.count == 1, ('The shapereader Reader class was created '
'more than (actually %s times) - '
' the caching is not working.' %
shapereader_counter.count)
plt.close()
def test_shapefile_transform_cache():
# a5caae040ee11e72a62a53100fe5edc355304419 added shapefile mpl geometry caching
# based on geometry object id. This test ensures it is working...
coastline_path = cartopy.io.shapereader.natural_earth(resolution="50m",
category='physical',
name='coastline')
geoms = tuple(cartopy.io.shapereader.Reader(coastline_path).geometries())[:10]
n_geom = len(geoms)
ax = plt.axes(projection=ccrs.Robinson())
project_geometry_counter = CallCounter(ax.projection, 'project_geometry')
# Capture the size of the cache before our test
gc.collect()
initial_cache_size = len(cgeoaxes._GEOMETRY_TO_PATH_CACHE)
with project_geometry_counter:
c = ax.add_geometries(geoms, ccrs.Geodetic())
c = ax.add_geometries(geoms, ccrs.Geodetic())
c = ax.add_geometries(geoms[:], ccrs.Geodetic())
# Before the performance enhancement, the count would have been
# n_calls * n_geom, but should now be just n_geom.
assert project_geometry_counter.count == n_geom, ('The given geometry was transformed '
'too many times (expected: %s; got %s) - '
' the caching is not working.' %
(n_geom, project_geometry_counter.count))
# Check the cache has an entry for each geometry.
assert len(cgeoaxes._GEOMETRY_TO_PATH_CACHE) == initial_cache_size + n_geom
# Check that the cache is empty again once we've dropped all references
# to the source paths.
plt.clf()
del geoms
gc.collect()
assert len(cgeoaxes._GEOMETRY_TO_PATH_CACHE) == initial_cache_size
plt.close()
def test_contourf_transform_path_counting():
ax = plt.axes(projection=ccrs.Robinson())
plt.draw()
path_to_geos_counter = CallCounter(cartopy.mpl_integration.patch,
'path_to_geos')
with path_to_geos_counter:
x, y, z = sample_data((30, 60))
cs = plt.contourf(x, y, z, 5, transform=ccrs.PlateCarree())
n_geom = sum([len(c.get_paths()) for c in cs.collections])
del cs, c
plt.draw()
# before the performance enhancement, the count would have been 2 * n_geom,
# but should now be just n_geom
assert path_to_geos_counter.count == n_geom, ('The given geometry was transfomed '
'too many times (expected: %s; got %s) - '
' the caching is not working.' %
(n_geom, path_to_geos_counter.count))
# Check the cache has an entry for each geometry.
assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == n_geom
# Check that the cache is empty again once we've dropped all references
# to the source paths.
plt.clf()
gc.collect()
assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == 0
plt.close()
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| gpl-3.0 |
tkchafin/mrbait | mrbait/mrbait_corefuncs_parallel.py | 1 | 18041 | #!/usr/bin/python
import sys
import sqlite3
import getopt
import Bio
import os
import time
from Bio import AlignIO
from mrbait import mrbait_menu
from mrbait import substring
from mrbait.substring import SubString
from functools import partial
from mrbait import manage_bait_db as m
from mrbait import alignment_tools as a
from mrbait import sequence_tools as s
from mrbait import misc_utils as utils
from mrbait import seq_graph as graph
from mrbait import aln_file_tools
from mrbait import vcf_tools
from mrbait import vsearch
from mrbait import gff3_parser as gff
from mrbait import blast as b
import subprocess
import pandas as pd
import numpy as np
import multiprocessing
"""
Parallel versions of some of the MrBait corefuncs.
Much thanks to SO user 'dano' for 2014 post on how to share lock in multiprocessing pool:
https://stackoverflow.com/questions/25557686/python-sharing-a-lock-between-processes
"""
#Function to load a GFF file into database
def loadGFF_parallel(conn, params):
t = int(params.threads)
#file chunker call
file_list = aln_file_tools.generic_chunker(params.gff, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadGFF_worker, params.db, chunk)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#worker function version of loadGFF
def loadGFF_worker(db, chunk):
try:
connection = sqlite3.connect(db)
#For each GFF record in params.gff
for record in gff.read_gff(chunk):
#Skip any records that are missing the sequence ID, or coordinates
if record.seqid == "NULL" or record.start == "NULL" or record.end == "NULL":
continue
if record.start > record.end:
temp = record.start
record.start = record.end
record.end = temp
#Get the alias, if it exists
alias = ""
if record.getAlias(): #returns false if no alias
alias = record.getAlias()
else:
alias = "NULL"
#NOTE: This function ONLY inserts GFFRecords where record.seqid matches an existing locus in the loci table
lock.acquire()
m.add_gff_record(connection, record.seqid, record.type.lower(), record.start, record.end, alias)
lock.release()
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to load a GFF file into database
def loadBED_parallel(conn, params):
t = int(params.threads)
#file chunker call
file_list = aln_file_tools.generic_chunker(params.bed, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadBED_worker, params.db, params.bed_header)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#worker function version of loadGFF
def loadBED_worker(db, bed_header, chunk):
try:
connection = sqlite3.connect(db)
with open(chunk)as f:
count=0
for line in f:
line = line.strip()
if not line:
continue
count+=1
if count <= bed_header:
continue
content = line.split()
#NOTE: This function ONLY inserts BEDRecords where record.seqid matches an existing locus in the loci table
lock.acquire()
print(content)
m.add_bed_record(connection, content[0], content[1], content[2])
lock.release()
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to load BED file
def loadBED(conn, params):
with open(params.bed)as f:
count=0
for line in f:
line = line.strip()
if not line:
continue
count+=1
if count <= params.bed_header:
continue
content = line.split()
#NOTE: This function ONLY inserts BEDRecords where record.seqid matches an existing locus in the loci table
m.add_bed_record(conn, content[0], content[1], content[2])
#remove BED records not falling within our loci
#print(m.getBED(conn))
m.validateBEDRecords(conn)
#print(m.getBED(conn))
#Function to load a XMFA file into database
def loadXMFA_parallel(conn, params):
t = int(params.threads)
numLoci = aln_file_tools.countXMFA(params.xmfa)
if numLoci < 10000:
print("\t\t\tReading",numLoci,"alignments.")
else:
print("\t\t\tReading",numLoci,"alignments... This may take a while.")
#file chunker call
file_list = aln_file_tools.xmfa_chunker(params.xmfa, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadXMFA_worker, params.db, params.cov, params.minlen, params.thresh, params.mask, params.maf)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#worker function version of loadMAF
def loadXMFA_worker(db, params_cov, params_minlen, params_thresh, params_mask, params_maf, chunk):
try:
connection = sqlite3.connect(db)
#Parse MAF file and create database
for aln in AlignIO.parse(chunk, "mauve"):
#NOTE: Add error handling, return error code
cov = len(aln)
alen = aln.get_alignment_length()
if cov < params_cov or alen < params_minlen:
continue
#Add each locus to database
locus = a.consensAlign(aln, threshold=params_thresh, mask=params_mask, maf=params_maf)
lock.acquire()
locid = m.add_locus_record(connection, cov, locus.conSequence, 1, "NULL")
lock.release()
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to load LOCI file in parallel
def loadLOCI_parallel(conn, params):
"""
Format:
multiprocessing pool.
Master:
splits file into n chunks
creates multiprocessing pool
Workers:
read file chunk
calculate consensus
grab lock
INSERT data to SQL database
release lock
"""
t = int(params.threads)
numLoci = aln_file_tools.countLoci(params.loci)
if numLoci < 10000:
print("\t\t\tReading",numLoci,"alignments.")
else:
print("\t\t\tReading",numLoci,"alignments... This may take a while.")
#file chunker call
file_list = aln_file_tools.loci_chunker(params.loci, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadLOCI_worker, params.db, params.cov, params.minlen, params.thresh, params.mask, params.maf)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
#Function to load MAF file in parallel
def loadMAF_parallel(conn, params):
t = int(params.threads)
numLoci = aln_file_tools.countMAF(params.alignment)
if numLoci < 10000:
print("\t\t\tReading",numLoci,"alignments.")
else:
print("\t\t\tReading",numLoci,"alignments... This may take a while.")
#file chunker call
file_list = aln_file_tools.maf_chunker(params.alignment, t, params.workdir)
#print("Files are:",file_list)
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
try:
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(loadMAF_worker, params.db, params.cov, params.minlen, params.thresh, params.mask, params.maf)
results = pool.map(func, file_list)
except Exception as e:
pool.close()
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
aln_file_tools.removeChunks(params.workdir)
# #first chunking, then arsing in parallel
# def loadVCF_parallel(conn, params):
# t = int(params.threads)
# #file chunker call
# file_list = vcf_tools.vcf_chunker(params.vcf, t, params.workdir)
#
# print("Files are:",file_list)
# #Initialize multiprocessing pool
# #if 'lock' not in globals():
# lock = multiprocessing.Lock()
# try:
# with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
# func = partial(loadVCF_worker, params.db, params.thresh)
# results = pool.map(func, file_list)
# except Exception as e:
# pool.close()
# pool.close()
# pool.join()
#
# #reset_lock()
# #Remove chunkfiles
# #aln_file_tools.removeChunks(params.workdir)
#INitialize a global lock. Doing it this way allows it to be inherited by the child processes properly
#Found on StackOverflow: https://stackoverflow.com/questions/25557686/python-sharing-a-lock-between-processes
#Thanks go to SO user dano
def init(l):
global lock
lock = l
#Function to reset lock
def reset_lock():
global lock
del lock
#NOTE: 'params' object can't be pickled, so I have to do it this way.
#worker function version of loadMAF
def loadMAF_worker(db, params_cov, params_minlen, params_thresh, params_mask, params_maf, chunk):
try:
connection = sqlite3.connect(db)
#Parse MAF file and create database
for aln in AlignIO.parse(chunk, "maf"):
#NOTE: Add error handling, return error code
cov = len(aln)
alen = aln.get_alignment_length()
if cov < params_cov or alen < params_minlen:
continue
#Add each locus to database
locus = a.consensAlign(aln, threshold=params_thresh, mask=params_mask, maf=params_maf)
lock.acquire()
locid = m.add_locus_record(connection, cov, locus.conSequence, 1, "NULL")
#print(locid)
lock.release()
#Extract variable positions for database
#for var in locus.alnVars:
#m.add_variant_record(connection, locid, var.position, var.value)
connection.close()
except Exception as e:
raise Exception(e.message)
# #Function to load VCF variants file
# def loadVCF_worker(db, threshold, chunk):
# try:
# #Each worker opens unique connection to db
# connection = sqlite3.connect(db)
# #Lock DB and read loci, then release lock
# lock.acquire()
# loci = m.getPassedLoci(connection) #get DF of passed loci
# lock.release()
#
# chrom_lookup = loci.set_index('chrom')['id'].to_dict()
# loci.set_index('id', inplace=True)
#
# passed=0 #To track number of VCF records for which no locus exists
# failed=0
# for reclist in vcf_tools.read_vcf(chunk):
# rec_chrom = reclist[0].CHROM
# if rec_chrom in chrom_lookup:
# locid = chrom_lookup[rec_chrom]
# passed+=1
# #Grab DF record for the matching CHROM
# seq = loci.loc[locid,'consensus']
# #Get new consensus sequence given VCF records
# new_cons = vcf_tools.make_consensus_from_vcf(seq,rec_chrom,reclist, threshold)
# print(new_cons)
# #Update new consensus seq in db
# if len(new_cons) != len(seq): #Check length first
# print("\t\t\tWarning: New consensus sequence for locus %s (locid=<%s>) is the wrong length! Skipping."%(rec_chrom, locid))
# else:
# #Lock database for update, then relase lock
# lock.acquire()
# m.updateConsensus(connection, locid, new_cons)
# lock.release()
# else:
# failed+=1
# if failed > 0:
# print("\t\t\tWARNING:%s/%s records in <%s> don't match any reference sequences"%(failed, failed+passed, chunk))
# #close connection
# connection.close()
# except Exception as e:
# raise Exception(e.message)
#Worker function for loadLOCI_parallel
def loadLOCI_worker(db, params_cov, params_minlen, params_thresh, params_mask, params_maf, chunk):
try:
connection = sqlite3.connect(db)
#Parse LOCI file and create database
for aln in aln_file_tools.read_loci(chunk):
#NOTE: Add error handling, return error code
cov = len(aln)
alen = aln.get_alignment_length()
#Skip if coverage or alignment length too short
if cov < params_cov or alen < params_minlen:
#print("Locus skipped")
continue
else:
#Add each locus to database
locus = a.consensAlign(aln, threshold=params_thresh, mask=params_mask, maf=params_maf)
#Acquire lock, submit to Database
lock.acquire()
locid = m.add_locus_record(connection, cov, locus.conSequence, 1, "NULL")
lock.release()
#print("Loading Locus #:",locid)
#Extract variable positions for database
#for var in locus.alnVars:
#m.add_variant_record(connection, locid, var.position, var.value)
connection.close()
except Exception as e:
raise Exception(e.message)
#Function to discover target regions using a sliding windows through passedLoci
def targetDiscoverySlidingWindow_parallel(conn, params, loci):
"""
Format:
1. Write pandas DF to n chunk files
2. List of chunk file names
3. Pass 1 chunk file to each worker in a multiprocessing pool.
Master:
creates n chunk files
creates multiprocessing pool
Workers:
read file chunk
calculate consensus
grab lock
INSERT data to SQL database
release lock
"""
t = int(params.threads)
chunk = 1
loci_num = int(loci.shape[0])
#print("number of loci:",loci_num)
#print("number of threads:",t)
chunks = 0
if loci_num < t:
chunks = loci_num
else:
chunks = t
chunk_size = loci_num // chunks
remainder = loci_num % chunks
#print("Chunk size is:",chunk_size)
#print("remainder is:",remainder)
start = 0
stop = 0
files = list()
#Split loci DataFrame into chunks, and keep list of chunk files
for df_chunk in np.array_split(loci, chunks):
size = df_chunk.shape[0]
#print("size of chunk",chunk,"is:",size)
chunk_file = params.workdir + "/." + str(chunk) + ".chunk"
#print(df_chunk)
df_chunk.to_csv(chunk_file, mode="w", index=False)
files.append(chunk_file)
chunk += 1
#Initialize multiprocessing pool
#if 'lock' not in globals():
lock = multiprocessing.Lock()
with multiprocessing.Pool(t,initializer=init, initargs=(lock,)) as pool:
func = partial(targetDiscoverySlidingWindow_worker, params.db, params.win_shift, params.win_width, params.var_max, params.numN, params.numG, params.blen, params.flank_dist, params.target_all)
results = pool.map(func, files)
pool.close()
pool.join()
#reset_lock()
#Remove chunkfiles
d = os.listdir(params.workdir)
for item in d:
if item.endswith(".chunk"):
os.remove(os.path.join(params.workdir, item))
#Function to discover target regions using a sliding windows through passedLoci
def targetDiscoverySlidingWindow_worker(db, shift, width, var, n, g, blen, flank_dist, target_all, chunk):
connection = sqlite3.connect(db)
#print("process: reading hdf from",chunk)
loci = pd.read_csv(chunk)
#print(loci)
for seq in loci.itertuples():
#print(seq)
start = 0
stop = 0
if target_all:
#print("target_all")
#submit full locus as target
seq_norm = s.simplifySeq(seq[2])
counts = s.seqCounterSimple(seq_norm)
if counts['*'] <= var and counts['N'] <= n and counts['-'] <= g:
target = seq[2]
tr_counts = s.seqCounterSimple(seq_norm)
n_mask = utils.n_lower_chars(seq[2])
n_gc = s.gc_counts(seq[2])
#NOTE: flank count set to number of variable sites in whole locus
#print(int(seq[1]), 0, len(seq[2]), seq[2], tr_counts, tr_counts, n_mask, n_gc)
lock.acquire()
m.add_region_record(connection, int(seq[1]), 0, len(seq[2]), seq[2], tr_counts, tr_counts, n_mask, n_gc)
lock.release()
else:
#print("\nConsensus: ", seq[2], "ID is: ", seq[1], "\n")
generator = s.slidingWindowGenerator(seq[2], shift, width)
for window_seq in generator():
seq_norm = s.simplifySeq(window_seq[0])
counts = s.seqCounterSimple(seq_norm)
#If window passes filters, extend current bait region
#print("Start is ", start, " and stop is ",stop) #debug print
if counts['*'] <= var and counts['N'] <= n and counts['-'] <= g:
stop = window_seq[2]
#if this window passes BUT is the last window, evaluate it
if stop == len(seq[2]):
#print("last window")
if (stop - start) >= blen:
target = (seq[2])[start:stop]
tr_counts = s.seqCounterSimple(s.simplifySeq(target))
#print("candidate:",window_seq[0])
n_mask = utils.n_lower_chars(target)
n_gc = s.gc_counts(target)
#Check that there aren't too many SNPs
#if tr_counts["*"] <= params.vmax_r:
#print(" Target region: ", target)
#Submit target region to database
#print("process: grabbing lock")'
flank_counts = s.getFlankCounts(seq[2], start, stop, flank_dist)
lock.acquire()
m.add_region_record(connection, int(seq[1]), start, stop, target, tr_counts, flank_counts, n_mask, n_gc)
#print("process: releasing lock")
lock.release()
#set start of next window to end of current TR
generator.setI(stop)
else:
#If window fails, check if previous bait region passes to submit to DB
#print (stop-start)
if (stop - start) >= blen:
target = (seq[2])[start:stop]
tr_counts = s.seqCounterSimple(s.simplifySeq(target))
n_mask = utils.n_lower_chars(target)
n_gc = s.gc_counts(target)
#Check that there aren't too many SNPs
#if tr_counts["*"] <= params.vmax_r:
#print(" Target region: ", target)
#Submit target region to database
#print("process: grabbing lock")'
flank_counts = s.getFlankCounts(seq[2], start, stop, flank_dist)
lock.acquire()
m.add_region_record(connection, int(seq[1]), start, stop, target, tr_counts, flank_counts, n_mask, n_gc)
#print("process: releasing lock")
lock.release()
#set start of next window to end of current TR
generator.setI(stop)
#If bait fails, set start to start point of next window
start = generator.getI()+shift
connection.close()
#Function to get DataFrame of targets + flank regions, and calculate some stuff
def flankDistParser_parallel(conn, dist):
#Call manage_bait_db function to return DataFrame
targets = m.getTargetFlanks(conn, dist)
| gpl-3.0 |
iwelland/hop | doc/examples/hoptraj.py | 1 | 2558 | #!/usr/bin/env python
#---------------- EDIT JOB NAME -------------------
#$ -N hoptraj
#--------------------------------------------------
#$ -S /usr/bin/python
#$ -v PYTHONPATH=/home/oliver/Library/python-lib
#$ -v LD_LIBRARY_PATH=/opt/intel/cmkl/8.0/lib/32:/opt/intel/itc60/slib:/opt/intel/ipp41/ia32_itanium/sharedlib:/opt/intel/ipp41/ia32_itanium/sharedlib/linux32:/opt/intel/fc/9.0/lib:/opt/intel/cc/9.0/lib
#$ -r n
#$ -j y
# Using the current working directory is IMPORTANT with the default settings for Job()
#$ -cwd
#$ -m e
# $Id$
from staging.SunGridEngine import Job
#------------------------------------------------------------
# EDIT THE inputfiles AND outputfiles DICTIONARIES.
#------------------------------------------------------------
# record input and output files relative to top_dir = cwd
job = Job(inputfiles=dict(density='analysis/water.pickle',
bulk='analysis/bulk.pickle',
psf='inp/XXX.psf',
dcd='trj/rmsfit_XXX.dcd'),
outputfiles=dict(density='analysis/water.pickle',
hopdcd='trj/hoptraj.dcd',
hoppsf='trj/hoptraj.psf',
))
#
#------------------------------------------------------------
job.stage()
# commands
import hop.utilities
hop.utilities.matplotlib_interactive(False)
from hop.interactive import *
from hop.sitemap import Density
import hop.constants
density = Density(filename=job.input['density'])
bulk = Density(filename=job.input['bulk'])
# Fixing metadata -- only necessary if the dcd has a wrong header
# as for instance produced by catdcd.
##density.metadata['psf'] = job.input['psf'] # used in make_hoppingtraj()
##density.metadata['dcd'] = job.input['dcd']
##delta_ps = 0.5 # the time between two frames in ps (Hirsh's trajectories)
##delta_AKMA = delta_ps * hop.constants.get_conversion_factor('time','ps','AKMA')
##density.metadata['delta'] = delta_AKMA
##fixtrajectory = {'delta':density.metadata['delta']}
fixtrajectory = None
# Add the biggest bulk site at position 1 if we haven't done so already.
# This is important so we are making extra sure.
try:
density.site_insert_bulk(bulk) # hack!
except ValueError,errmsg:
print errmsg
print "Bulk site not inserted because there already exists a bulk site --- that's good!"
density.save(job.output['density']) # also save modified metadata
del bulk
hops = make_hoppingtraj(density,job.output['hopdcd'],fixtrajectory=fixtrajectory)
job.unstage()
job.cleanup()
| lgpl-3.0 |
tomsilver/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/contour.py | 69 | 42063 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as path
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as collections
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ContourLabeler:
'''Mixin to provide labelling capability to ContourSet'''
def clabel(self, *args, **kwargs):
"""
call signature::
clabel(cs, **kwargs)
adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
See http://matplotlib.sf.net/fonts.html
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g. *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string)
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
# Detect if manual selection is desired and remove from argument list
self.labelManual=kwargs.get('manual',False)
if len(args) == 0:
levels = self.levels
indices = range(len(self.levels))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize == None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors == None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap = cmap,
norm = colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if self.labelManual:
print 'Select label locations manually using first mouse button.'
print 'End manual selection with second mouse button.'
if not inline:
print 'Remove last label by clicking third mouse button.'
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline,inline_spacing)
else:
self.labels(inline,inline_spacing)
# Hold on to some old attribute names. These are depricated and will
# be removed in the near future (sometime after 2008-08-01), but keeping
# for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour,labelwidth):
"if contours are too short, don't plot a label"
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return 1
xmax = np.amax(linecontour[:,0])
xmin = np.amin(linecontour[:,0])
ymax = np.amax(linecontour[:,1])
ymin = np.amin(linecontour[:,1])
lw = labelwidth
if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
return 1
else:
return 0
def too_close(self, x,y, lw):
"if there's a label already nearby, find a better place"
if self.labelXYs != []:
dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2)
for loc in self.labelXYs]
for d in dist:
if d < 1.2*lw:
return 1
else: return 0
else: return 0
def get_label_coords(self, distances, XX, YY, ysize, lw):
""" labels are ploted at a location with the smallest
dispersion of the contour from a straight line
unless there's another label nearby, in which case
the second best place on the contour is picked up
if there's no good place a label isplotted at the
beginning of the contour
"""
hysize = int(ysize/2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x,y, lw):
continue
else:
return x,y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x,y, ind
def get_label_width(self, lev, fmt, fsize):
"get the width of the label in points"
if cbook.is_string_like(lev):
lw = (len(lev)) * fsize
else:
lw = (len(self.get_text(lev,fmt))) * fsize
return lw
def get_real_label_width( self, lev, fmt, fsize ):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
"""
# Find middle of axes
xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 )
# Temporarily create text object
t = text.Text( xx[0], xx[1] )
self.set_label_props( t, self.get_text(lev,fmt), 'k' )
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2,0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt,dict):
return fmt[lev]
else:
return fmt%lev
def locate_label(self, linecontour, labelwidth):
"""find a good place to plot a label (relatively flat
part of the contour) and the angle of rotation for the
text object
"""
nsize= len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize/labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = labelwidth
XX = np.resize(linecontour[:,0],(xsize, ysize))
YY = np.resize(linecontour[:,1],(xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:,0].reshape(xsize, 1)
ylast = YY[:,-1].reshape(xsize, 1)
xfirst = XX[:,0].reshape(xsize, 1)
xlast = XX[:,-1].reshape(xsize, 1)
s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst)
L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel()
dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1)
x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x,y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None: lc = []
# Half the label width
hlw = lw/2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[ slc[ind:-1], slc[:ind+1] ]
if len(lc): # Rotate lc also if not empty
lc = np.r_[ lc[ind:-1], lc[:ind+1] ]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl-pl[ind]
# Use linear interpolation to get points around label
xi = np.array( [ -hlw, hlw ] )
if closed: # Look at end also for closed contours
dp = np.array([pl[-1],0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi,
extrap=True )
# get vector in pixel space coordinates from one point to other
dd = np.diff( ll, axis=0 ).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd==0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing,spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False )
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0])<>I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[0] ] )
if (not np.isnan(I[1])) and int(I[1])<>I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[1] ] )
# Make integer
I = [ np.floor(I[0]), np.ceil(I[1]) ]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] )
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append( np.r_[ lc[:I[0]+1], xy1 ] )
if not np.isnan(I[1]):
nlc.append( np.r_[ xy2, lc[I[1]:] ] )
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavoir.
#if not len(nlc): nlc = [ lc ]
return (rotation,nlc)
def add_label(self,x,y,rotation,lev,cvalue):
dx,dy = self.ax.transData.inverted().transform_point((x,y))
t = text.Text(dx, dy, rotation = rotation,
horizontalalignment='center',
verticalalignment='center')
color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha)
_text = self.get_text(lev,self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x,y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def pop_label(self,index=-1):
'''Defaults to removing last label, but any index can be supplied'''
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
trans = self.ax.transData # A bit of shorthand
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList, self.labelFontSizeList,
self.labelCValueList ):
con = self.collections[icon]
lw = self.get_label_width(lev, self.labelFmt, fsize)
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon( lc ):
slc = np.r_[ slc0, slc0[1:2,:] ]
else:
slc = slc0
if self.print_label(slc,lw): # Check if long enough for a label
x,y,ind = self.locate_label(slc, lw)
if inline: lcarg = lc
else: lcarg = None
rotation,new=self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing )
# Actually add the label
self.add_label(x,y,rotation,lev,cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n)>1: additions.append( path.Path(n) )
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Create and store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in ContourSet.contour_doc.
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', 'solid')
self.alpha = kwargs.get('alpha', 1.0)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', True)
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log scale')
else:
self.logscale = False
if self.origin is not None: assert(self.origin in
['lower', 'upper', 'image'])
if self.extent is not None: assert(len(self.extent) == 4)
if cmap is not None: assert(isinstance(cmap, colors.Colormap))
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
x, y, z = self._contour_args(*args) # also sets self.levels,
# self.layers
if self.colors is not None:
cmap = colors.ListedColormap(self.colors, N=len(self.layers))
if self.filled:
self.collections = cbook.silent_list('collections.PolyCollection')
else:
self.collections = cbook.silent_list('collections.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
self._process_colors()
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
C = _cntr.Cntr(x, y, z.filled(), _mask)
lowers = self._levels[:-1]
uppers = self._levels[1:]
for level, level_upper in zip(lowers, uppers):
nlist = C.trace(level, level_upper, points = 0,
nchunk = self.nchunk)
col = collections.PolyCollection(nlist,
antialiaseds = (self.antialiased,),
edgecolors= 'none',
alpha=self.alpha)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
C = _cntr.Cntr(x, y, z.filled(), _mask)
for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
nlist = C.trace(level, points = 0)
col = collections.LineCollection(nlist,
linewidths = width,
linestyle = lstyle,
alpha=self.alpha)
if level < 0.0 and self.monochrome:
ls = mpl.rcParams['contour.negative_linestyle']
col.set_linestyle(ls)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0,y0), (x1,y1)])
self.ax.autoscale_view()
def changed(self):
tcolors = [ (tuple(rgba),) for rgba in
self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
for color, collection in zip(tcolors, self.collections):
collection.set_alpha(self.alpha)
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
'''
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
'''
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N+1)
self.locator.create_dummy_axis()
zmax = self.zmax
zmin = self.zmin
self.locator.set_bounds(zmin, zmax)
lev = self.locator()
zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin)
if zmax >= lev[-1]:
lev[-1] += zmargin
if zmin <= lev[0]:
if self.logscale:
lev[0] = 0.99 * zmin
else:
lev[0] -= zmargin
self._auto = True
if self.filled:
return lev
return lev[1:-1]
def _initialize_x_y(self, z):
'''
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
'''
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0,x1,y0,y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0,x1,y0,y1 = (0, Nx, 0, Ny)
else:
x0,x1,y0,y1 = self.extent
dx = float(x1 - x0)/Nx
dy = float(y1 - y0)/Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x,y)
def _check_xyz(self, args):
'''
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
'''
# We can strip away the x and y units
x = self.ax.convert_xunits( args[0] )
y = self.ax.convert_yunits( args[1] )
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else: Ny, Nx = z.shape
if x.shape == z.shape and y.shape == z.shape:
return x,y,z
if x.ndim != 1 or y.ndim != 1:
raise TypeError("Inputs x and y must be 1D or 2D.")
nx, = x.shape
ny, = y.shape
if nx != Nx or ny != Ny:
raise TypeError("Length of x must be number of columns in z,\n" +
"and length of y must be number of rows.")
x,y = np.meshgrid(x,y)
return x,y,z
def _contour_args(self, *args):
if self.filled: fn = 'contourf'
else: fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
elif Nargs <=4:
x,y,z = self._check_xyz(args[:3])
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <=0 have been masked')
self.zmin = z.min()
self._auto = False
if self.levels is None:
if Nargs == 1 or Nargs == 3:
lev = self._autolev(z, 7)
else: # 2 or 4 args
level_arg = args[-1]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" % (fn,fn))
if self.filled and len(lev) < 2:
raise ValueError("Filled contours require at least 2 levels.")
# Workaround for cntr.c bug wrt masked interior regions:
#if filled:
# z = ma.masked_array(z.filled(-1e38))
# It's not clear this is any better than the original bug.
self.levels = lev
#if self._auto and self.extend in ('both', 'min', 'max'):
# raise TypeError("Auto level selection is inconsistent "
# + "with use of 'extend' kwarg")
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1],self.zmax) + 1)
self._levels = np.asarray(self._levels)
self.vmin = np.amin(self.levels) # alternative would be self.layers
self.vmax = np.amax(self.levels)
if self.extend in ('both', 'min'):
self.vmin = 2 * self.levels[0] - self.levels[1]
if self.extend in ('both', 'max'):
self.vmax = 2 * self.levels[-1] - self.levels[-2]
self.layers = self._levels # contour: a line is a thin layer
if self.filled:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
if self.extend in ('both', 'min'):
self.layers[0] = 0.5 * (self.vmin + self._levels[1])
if self.extend in ('both', 'max'):
self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
return (x, y, z)
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels,
not on the actual range of the Z values. This means we
don't have to worry about bad values in Z, and we always have
the full dynamic range available for the selected levels.
The color is based on the midpoint of the layer, except for
an extended end layers.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
i0, i1 = 0, len(self.layers)
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 = i1 + 1
self.cvalues = range(i0, i1)
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
if not self.norm.scaled():
self.set_clim(self.vmin, self.vmax)
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
self.set_array(self.layers)
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] *Nlev
else:
if cbook.iterable(linewidths) and len(linewidths) < Nlev:
linewidths = list(linewidths) * int(np.ceil(Nlev/len(linewidths)))
elif not cbook.iterable(linewidths) and type(linewidths) in [int, float]:
linewidths = [linewidths] * Nlev
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles) and len(linestyles) <= Nlev:
tlinestyles = list(linestyles) * int(np.ceil(Nlev/len(linestyles)))
return tlinestyles
def get_alpha(self):
'''returns alpha to be applied to all ContourSet artists'''
return self.alpha
def set_alpha(self, alpha):
'''sets alpha for all ContourSet artists'''
self.alpha = alpha
self.changed()
contour_doc = """
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the Matlab
(TM) version in that it does not draw the polygon edges,
because the contouring engine yields simply connected regions
with branch cuts. To draw the edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (*x*, *y*) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the (len(*V*)-1) regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X*, *Y*, and *Z* must be arrays with the same dimensions.
*Z* may be a masked array, but filled contouring may not
handle internal masked regions correctly.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.ContourSet` object.
Optional keyword arguments:
*colors*: [ None | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ None | Colormap ]
A cm :class:`~matplotlib.cm.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*origin*: [ None | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ None | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ None | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.cm.Colormap.set_under` and
:meth:`matplotlib.cm.Colormap.set_over` methods.
contour-only keyword arguments:
*linewidths*: [ None | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
contourf-only keyword arguments:
*antialiased*: [ True | False ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
**Example:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
def find_nearest_contour( self, x, y, indices=None, pixel=True ):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments::
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices==None:
indices = range(len(self.levels))
dmin = 1e10
conmin = None
segmin = None
xmin = None
ymin = None
for icon in indices:
con = self.collections[icon]
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = self.ax.transData.transform(lc)
ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2
d = min( ds )
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = mpl.mlab.find( ds == d )[0]
xmin = lc[imin,0]
ymin = lc[imin,1]
return (conmin,segmin,imin,xmin,ymin,dmin)
| gpl-3.0 |
tku137/JPKay | tests/test_JPKForce.py | 1 | 1936 | # coding=utf-8
import pytest
import numpy.testing as npt
from numpy import array
import pandas.util.testing as pdt
import pandas as pd
from JPKay.core.data_structures import CellHesion
# noinspection PyShadowingNames,PyPep8Naming
@pytest.mark.usefixtures("sample_force_file")
class TestJpkForce:
def test_load_encoded_data_segment(self, sample_force_file):
sample = CellHesion(sample_force_file)
segment = 'retract'
vDef, height = sample.load_encoded_data_segment(segment)
assert vDef[0] == -4454604
assert height[0] == 468876141
assert vDef.shape == (1000, 1)
assert height.shape == (1000, 1)
def test_load_data(self, sample_force_file):
sample = CellHesion(sample_force_file)
assert sample.data.shape == (1000, 8)
iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]
index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])
data = array([-2.98158446715e-11, 3.90831266155e-05])
df = pd.DataFrame(columns=index)
df.loc[0, 'retract'] = data
pdt.assert_almost_equal(sample.data.loc[0], df.loc[0])
def test_convert_data(self, sample_force_file):
sample = CellHesion(sample_force_file)
conv_1 = sample.convert_data('vDeflection', array(-4454604))
conv_2 = sample.convert_data('height', array(468876141))
npt.assert_almost_equal(conv_1, array(-2.98158446715e-11), decimal=20)
npt.assert_almost_equal(conv_2, array(3.90831266155e-05), decimal=15)
def test_construct_df(self, sample_force_file):
sample = CellHesion(sample_force_file)
df = sample.construct_df()
iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]
index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])
pdt.assert_frame_equal(df, pd.DataFrame(columns=index))
| mit |
J535D165/recordlinkage | recordlinkage/config_init.py | 1 | 1304 | import recordlinkage.config as cf
from recordlinkage.config import (get_default_val, is_bool, is_callable,
is_instance_factory, is_int,
is_one_of_factory, is_text
)
pairs_type_doc = """
: str
Specify the format how record pairs are stored. By default, record
pairs generated by the toolkit are returned in a
pandas.MultiIndex object ('multiindex' option).
Valid values: 'multiindex'
"""
classification_return_type_doc = """
: str
The format of the classification result. The value 'index' returns the
classification result as a pandas.MultiIndex. The MultiIndex contains
the predicted matching record pairs. The value 'series' returns a
pandas.Series with zeros (distinct) and ones (matches). The argument
value 'array' will return a numpy.ndarray with zeros and ones.
"""
with cf.config_prefix('indexing'):
cf.register_option(
'pairs',
'multiindex',
pairs_type_doc,
validator=is_one_of_factory(['multiindex']))
with cf.config_prefix('classification'):
cf.register_option(
'return_type',
'index',
classification_return_type_doc,
validator=is_one_of_factory(['index', 'series', 'array']))
| bsd-3-clause |
mitschabaude/nanopores | scripts/pughpore/diffusivity/diff_bulk.py | 1 | 1313 | import numpy as np
import os
from math import sinh, acosh
import nanopores as nano
import nanopores.geometries.pughpore as pughpore
import nanopores.tools.fields as fields
from matplotlib import pyplot as plt
geop = nano.Params(pughpore.params)
fields.set_dir(os.path.expanduser("~") + "/Dropbox/nanopores/fields")
r = geop.rMolecule
eps = 1e-8
x = np.linspace(r+eps, 30, 100)
def Cp1(l):
return 1.-(9./16.)*r/l
def Cp(h):
x = r/h
return 1. - 9./16.*x + 1./8.*x**3 - 45./256.*x**4 - 1/16.*x**5
def Cn(l):
alpha = acosh(l/r)
s = 0.
for n in range(1, 100):
n = float(n)
K = n*(n+1)/(2*n-1)/(2*n+3)
s += K*((2*sinh((2*n+1)*alpha)+(2*n+1)*sinh(2*alpha))/(4*(sinh((n+.5)*alpha))**2-(2*n+1)**2*(sinh(alpha))**2) - 1)
return 1./((4./3.)*sinh(alpha)*s)
Dn = np.array([Cn(xx) for xx in x])
Dt = Cp(x)
def matrix(d):
return [[d[0], 0., 0.], [0., d[1], 0.], [0., 0., d[2]]]
data = dict(x=list(x), D=map(matrix, zip(Dn, Dt, Dt)))
if not fields.exists("pugh_diff_bulk", rMolecule=r):
print "SAVING..."
fields.save_fields("pugh_diff_bulk", dict(rMolecule=r), **data)
fields.update()
plt.plot(x, Dt, ".-", label=r"$D_t$")
plt.plot(x, Cp1(x), ".-", label=r"$D_t$ simple")
plt.plot(x, Dn, ".-", label=r"$D_t$")
plt.legend(loc="lower right")
#plt.show()
| mit |
yunque/PyML | RandomWalk.py | 1 | 2464 | #!/usr/bin/env python
"""
RANDOM WALK
Generate a random walk sequence of times and frequencies, which are used to
synthesize sinusoidal sweeps of random durations between random frequencies.
1. generate intervals randomly until they add up to 10.
2. 3*10=30 >> divide
"""
from __future__ import division
import numpy as np
from pylab import add
import matplotlib.pyplot as plt
def randomWalk(f0):
# Create a list of time intervals between 0 and T, each drawn from a normal distribution
# Time starts at 0
times = [0]
# Initial frequency (pass 110/220/440)
freqs = [f0]
T = 10
acc = 0
i = 0
while acc <= T:
# time distributed around mean=1
randTime = np.random.normal(1,1,1) #float("{0:.2f}".format(x))
times.extend(abs(randTime))
# freq distributed around previous freq
randFreq = int(np.random.normal(freqs[-1],10,1))
freqs.extend([randFreq])
acc += times[-1]
i += 1
# Adjust final time point
times[-1] = int(times[-1])
# # Don't need the last frequency, as we only want one freq per interval
# freqs = freqs[:-1]
# # DBG
# numIntervals = len(times)
# print numIntervals
return times, freqs
def mySweep(f1,f2,dur):
fs = 16000
sample_rate = 1/fs #.0000625
t_change = 0.5
times = np.arange(0, dur, sample_rate*dur)
ramp = 1./(1+np.exp(-6.*(times-t_change)))
freq = f1*(1-ramp) + f2*ramp
phase_correction = add.accumulate(times*np.concatenate((np.zeros(1), 2*np.pi*(freq[:-1]-freq[1:]))))
uncorrected = np.sin(2*np.pi*freq*times)
sweeped = np.sin(2*np.pi*freq*times+phase_correction)
return sweeped
if __name__ == "__main__":
f0 = 10
times, freqs = randomWalk(f0)
# times = [0,0.5,1,2]
# freqs = [10,20,10,20]
print times
print freqs
randomSweep = []
for i in xrange(len(times)-1):
# t1 = sum(times[:i])
# t2 = sum(times[:i+1]) # - ( ) # <<<
# print t2-t1
print i
print freqs[i], freqs[i+1], ( sum(times[:i+2]) - sum(times[:i+1]) )
f1 = freqs[i]
f2 = freqs[i+1]
dur = ( sum(times[:i+2]) - sum(times[:i+1]) )
print "dur = ",dur
# sweeped = mySweep(freqs[i], freqs[i+1], (times[i+1]-times[i]) )
sweeped = mySweep(f1,f2,dur)
randomSweep.extend(sweeped)
print "np.shape(randomSweep) = ",np.shape(randomSweep)
plt.plot(randomSweep)
plt.show()
times, freqs = randomWalk(randFreq)
randomSweep = []
for i in xrange(len(times)-1):
f1 = freqs[i]
f2 = freqs[i+1]
dur = ( sum(times[:i+2]) - sum(times[:i+1]) )
sweeped = mySweep(f1,f2,dur)
randomSweep.extend(sweeped)
| gpl-2.0 |
bloyl/mne-python | tutorials/stats-sensor-space/10_background_stats.py | 10 | 29158 | # -*- coding: utf-8 -*-
"""
.. _disc-stats:
=====================
Statistical inference
=====================
Here we will briefly cover multiple concepts of inferential statistics in an
introductory manner, and demonstrate how to use some MNE statistical functions.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa, analysis:ignore
import mne
from mne.stats import (ttest_1samp_no_p, bonferroni_correction, fdr_correction,
permutation_t_test, permutation_cluster_1samp_test)
print(__doc__)
###############################################################################
# Hypothesis testing
# ------------------
# Null hypothesis
# ^^^^^^^^^^^^^^^
# From `Wikipedia <https://en.wikipedia.org/wiki/Null_hypothesis>`__:
#
# In inferential statistics, a general statement or default position that
# there is no relationship between two measured phenomena, or no
# association among groups.
#
# We typically want to reject a **null hypothesis** with
# some probability (e.g., p < 0.05). This probability is also called the
# significance level :math:`\alpha`.
# To think about what this means, let's follow the illustrative example from
# :footcite:`RidgwayEtAl2012` and construct a toy dataset consisting of a
# 40 x 40 square with a "signal" present in the center with white noise added
# and a Gaussian smoothing kernel applied.
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
n_permutations = 'all' # run an exact test
n_src = width * width
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(2)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# The data averaged over all subjects looks like this:
fig, ax = plt.subplots()
ax.imshow(X.mean(0), cmap='inferno')
ax.set(xticks=[], yticks=[], title="Data averaged over subjects")
###############################################################################
# In this case, a null hypothesis we could test for each voxel is:
#
# There is no difference between the mean value and zero
# (:math:`H_0 \colon \mu = 0`).
#
# The alternative hypothesis, then, is that the voxel has a non-zero mean
# (:math:`H_1 \colon \mu \neq 0`).
# This is a *two-tailed* test because the mean could be less than
# or greater than zero, whereas a *one-tailed* test would test only one of
# these possibilities, i.e. :math:`H_1 \colon \mu \geq 0` or
# :math:`H_1 \colon \mu \leq 0`.
#
# .. note:: Here we will refer to each spatial location as a "voxel".
# In general, though, it could be any sort of data value,
# including cortical vertex at a specific time, pixel in a
# time-frequency decomposition, etc.
#
# Parametric tests
# ^^^^^^^^^^^^^^^^
# Let's start with a **paired t-test**, which is a standard test
# for differences in paired samples. Mathematically, it is equivalent
# to a 1-sample t-test on the difference between the samples in each condition.
# The paired t-test is **parametric**
# because it assumes that the underlying sample distribution is Gaussian, and
# is only valid in this case. This happens to be satisfied by our toy dataset,
# but is not always satisfied for neuroimaging data.
#
# In the context of our toy dataset, which has many voxels
# (:math:`40 \cdot 40 = 1600`), applying the paired t-test is called a
# *mass-univariate* approach as it treats each voxel independently.
titles = ['t']
out = stats.ttest_1samp(X, 0, axis=0)
ts = [out[0]]
ps = [out[1]]
mccs = [False] # these are not multiple-comparisons corrected
def plot_t_p(t, p, title, mcc, axes=None):
if axes is None:
fig = plt.figure(figsize=(6, 3))
axes = [fig.add_subplot(121, projection='3d'), fig.add_subplot(122)]
show = True
else:
show = False
p_lims = [0.1, 0.001]
t_lims = -stats.distributions.t.ppf(p_lims, n_subjects - 1)
p_lims = [-np.log10(p) for p in p_lims]
# t plot
x, y = np.mgrid[0:width, 0:width]
surf = axes[0].plot_surface(x, y, np.reshape(t, (width, width)),
rstride=1, cstride=1, linewidth=0,
vmin=t_lims[0], vmax=t_lims[1], cmap='viridis')
axes[0].set(xticks=[], yticks=[], zticks=[],
xlim=[0, width - 1], ylim=[0, width - 1])
axes[0].view_init(30, 15)
cbar = plt.colorbar(ax=axes[0], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=surf)
cbar.set_ticks(t_lims)
cbar.set_ticklabels(['%0.1f' % t_lim for t_lim in t_lims])
cbar.set_label('t-value')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if not show:
axes[0].set(title=title)
if mcc:
axes[0].title.set_weight('bold')
# p plot
use_p = -np.log10(np.reshape(np.maximum(p, 1e-5), (width, width)))
img = axes[1].imshow(use_p, cmap='inferno', vmin=p_lims[0], vmax=p_lims[1],
interpolation='nearest')
axes[1].set(xticks=[], yticks=[])
cbar = plt.colorbar(ax=axes[1], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=img)
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p_lim for p_lim in p_lims])
cbar.set_label(r'$-\log_{10}(p)$')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if show:
text = fig.suptitle(title)
if mcc:
text.set_weight('bold')
plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0)
mne.viz.utils.plt_show()
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# The "hat" technique regularizes the variance values used in the t-test
# calculation :footcite:`RidgwayEtAl2012` to compensate for implausibly small
# variances.
ts.append(ttest_1samp_no_p(X, sigma=sigma))
ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2)
titles.append(r'$\mathrm{t_{hat}}$')
mccs.append(False)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Non-parametric tests
# ^^^^^^^^^^^^^^^^^^^^
# Instead of assuming an underlying Gaussian distribution, we could instead
# use a **non-parametric resampling** method. In the case of a paired t-test
# between two conditions A and B, which is mathematically equivalent to a
# one-sample t-test between the difference in the conditions A-B, under the
# null hypothesis we have the principle of **exchangeability**. This means
# that, if the null is true, we can exchange conditions and not change
# the distribution of the test statistic.
#
# When using a paired t-test, exchangeability thus means that we can flip the
# signs of the difference between A and B. Therefore, we can construct the
# **null distribution** values for each voxel by taking random subsets of
# samples (subjects), flipping the sign of their difference, and recording the
# absolute value of the resulting statistic (we record the absolute value
# because we conduct a two-tailed test). The absolute value of the statistic
# evaluated on the veridical data can then be compared to this distribution,
# and the p-value is simply the proportion of null distribution values that
# are smaller.
#
# .. warning:: In the case of a true one-sample t-test, i.e. analyzing a single
# condition rather than the difference between two conditions,
# it is not clear where/how exchangeability applies; see
# `this FieldTrip discussion <ft_exch_>`_.
#
# In the case where ``n_permutations`` is large enough (or "all") so
# that the complete set of unique resampling exchanges can be done
# (which is :math:`2^{N_{samp}}-1` for a one-tailed and
# :math:`2^{N_{samp}-1}-1` for a two-tailed test, not counting the
# veridical distribution), instead of randomly exchanging conditions
# the null is formed from using all possible exchanges. This is known
# as a permutation test (or exact test).
# Here we have to do a bit of gymnastics to get our function to do
# a permutation test without correcting for multiple comparisons:
X.shape = (n_subjects, n_src) # flatten the array for simplicity
titles.append('Permutation')
ts.append(np.zeros(width * width))
ps.append(np.zeros(width * width))
mccs.append(False)
for ii in range(n_src):
ts[-1][ii], ps[-1][ii] = permutation_t_test(X[:, [ii]], verbose=False)[:2]
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Multiple comparisons
# --------------------
# So far, we have done no correction for multiple comparisons. This is
# potentially problematic for these data because there are
# :math:`40 \cdot 40 = 1600` tests being performed. If we use a threshold
# p < 0.05 for each individual test, we would expect many voxels to be declared
# significant even if there were no true effect. In other words, we would make
# many **type I errors** (adapted from `here <errors_>`_):
#
# .. rst-class:: skinnytable
#
# +----------+--------+------------------+------------------+
# | | Null hypothesis |
# | +------------------+------------------+
# | | True | False |
# +==========+========+==================+==================+
# | | | Type I error | Correct |
# | | Yes | False positive | True positive |
# + Reject +--------+------------------+------------------+
# | | | Correct | Type II error |
# | | No | True Negative | False negative |
# +----------+--------+------------------+------------------+
#
# To see why, consider a standard :math:`\alpha = 0.05`.
# For a single test, our probability of making a type I error is 0.05.
# The probability of making at least one type I error in
# :math:`N_{\mathrm{test}}` independent tests is then given by
# :math:`1 - (1 - \alpha)^{N_{\mathrm{test}}}`:
N = np.arange(1, 80)
alpha = 0.05
p_type_I = 1 - (1 - alpha) ** N
fig, ax = plt.subplots(figsize=(4, 3))
ax.scatter(N, p_type_I, 3)
ax.set(xlim=N[[0, -1]], ylim=[0, 1], xlabel=r'$N_{\mathrm{test}}$',
ylabel=u'Probability of at least\none type I error')
ax.grid(True)
fig.tight_layout()
fig.show()
###############################################################################
# To combat this problem, several methods exist. Typically these
# provide control over either one of the following two measures:
#
# 1. `Familywise error rate (FWER) <fwer_>`_
# The probability of making one or more type I errors:
#
# .. math::
# \mathrm{P}(N_{\mathrm{type\ I}} >= 1 \mid H_0)
#
# 2. `False discovery rate (FDR) <fdr_>`_
# The expected proportion of rejected null hypotheses that are
# actually true:
#
# .. math::
# \mathrm{E}(\frac{N_{\mathrm{type\ I}}}{N_{\mathrm{reject}}}
# \mid N_{\mathrm{reject}} > 0) \cdot
# \mathrm{P}(N_{\mathrm{reject}} > 0 \mid H_0)
#
# We cover some techniques that control FWER and FDR below.
#
# Bonferroni correction
# ^^^^^^^^^^^^^^^^^^^^^
# Perhaps the simplest way to deal with multiple comparisons, `Bonferroni
# correction <https://en.wikipedia.org/wiki/Bonferroni_correction>`__
# conservatively multiplies the p-values by the number of comparisons to
# control the FWER.
titles.append('Bonferroni')
ts.append(ts[-1])
ps.append(bonferroni_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# False discovery rate (FDR) correction
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Typically FDR is performed with the Benjamini-Hochberg procedure, which
# is less restrictive than Bonferroni correction for large numbers of
# comparisons (fewer type II errors), but provides less strict control of type
# I errors.
titles.append('FDR')
ts.append(ts[-1])
ps.append(fdr_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Non-parametric resampling test with a maximum statistic
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# **Non-parametric resampling tests** can also be used to correct for multiple
# comparisons. In its simplest form, we again do permutations using
# exchangeability under the null hypothesis, but this time we take the
# *maximum statistic across all voxels* in each permutation to form the
# null distribution. The p-value for each voxel from the veridical data
# is then given by the proportion of null distribution values
# that were smaller.
#
# This method has two important features:
#
# 1. It controls FWER.
# 2. It is non-parametric. Even though our initial test statistic
# (here a 1-sample t-test) is parametric, the null
# distribution for the null hypothesis rejection (the mean value across
# subjects is indistinguishable from zero) is obtained by permutations.
# This means that it makes no assumptions of Gaussianity
# (which do hold for this example, but do not in general for some types
# of processed neuroimaging data).
titles.append(r'$\mathbf{Perm_{max}}$')
out = permutation_t_test(X, verbose=False)[:2]
ts.append(out[0])
ps.append(out[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Clustering
# ^^^^^^^^^^
# Each of the aforementioned multiple comparisons corrections have the
# disadvantage of not fully incorporating the correlation structure of the
# data, namely that points close to one another (e.g., in space or time) tend
# to be correlated. However, by defining the adjacency/adjacency/neighbor
# structure in our data, we can use **clustering** to compensate.
#
# To use this, we need to rethink our null hypothesis. Instead
# of thinking about a null hypothesis about means per voxel (with one
# independent test per voxel), we consider a null hypothesis about sizes
# of clusters in our data, which could be stated like:
#
# The distribution of spatial cluster sizes observed in two experimental
# conditions are drawn from the same probability distribution.
#
# Here we only have a single condition and we contrast to zero, which can
# be thought of as:
#
# The distribution of spatial cluster sizes is independent of the sign
# of the data.
#
# In this case, we again do permutations with a maximum statistic, but, under
# each permutation, we:
#
# 1. Compute the test statistic for each voxel individually.
# 2. Threshold the test statistic values.
# 3. Cluster voxels that exceed this threshold (with the same sign) based on
# adjacency.
# 4. Retain the size of the largest cluster (measured, e.g., by a simple voxel
# count, or by the sum of voxel t-values within the cluster) to build the
# null distribution.
#
# After doing these permutations, the cluster sizes in our veridical data
# are compared to this null distribution. The p-value associated with each
# cluster is again given by the proportion of smaller null distribution
# values. This can then be subjected to a standard p-value threshold
# (e.g., p < 0.05) to reject the null hypothesis (i.e., find an effect of
# interest).
#
# This reframing to consider *cluster sizes* rather than *individual means*
# maintains the advantages of the standard non-parametric permutation
# test -- namely controlling FWER and making no assumptions of parametric
# data distribution.
# Critically, though, it also accounts for the correlation structure in the
# data -- which in this toy case is spatial but in general can be
# multidimensional (e.g., spatio-temporal) -- because the null distribution
# will be derived from data in a way that preserves these correlations.
#
# .. sidebar:: Effect size
#
# For a nice description of how to compute the effect size obtained
# in a cluster test, see this
# `FieldTrip mailing list discussion <ft_cluster_effect_size_>`_.
#
# However, there is a drawback. If a cluster significantly deviates from
# the null, no further inference on the cluster (e.g., peak location) can be
# made, as the entire cluster as a whole is used to reject the null.
# Moreover, because the test statistic concerns the full data, the null
# hypothesis (and our rejection of it) refers to the structure of the full
# data. For more information, see also the comprehensive
# `FieldTrip tutorial <ft_cluster_>`_.
#
# Defining the adjacency matrix
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First we need to define our adjacency (sometimes called "neighbors") matrix.
# This is a square array (or sparse matrix) of shape ``(n_src, n_src)`` that
# contains zeros and ones to define which spatial points are neighbors, i.e.,
# which voxels are adjacent to each other. In our case this
# is quite simple, as our data are aligned on a rectangular grid.
#
# Let's pretend that our data were smaller -- a 3 x 3 grid. Thinking about
# each voxel as being connected to the other voxels it touches, we would
# need a 9 x 9 adjacency matrix. The first row of this matrix contains the
# voxels in the flattened data that the first voxel touches. Since it touches
# the second element in the first row and the first element in the second row
# (and is also a neighbor to itself), this would be::
#
# [1, 1, 0, 1, 0, 0, 0, 0, 0]
#
# :mod:`sklearn.feature_extraction` provides a convenient function for this:
from sklearn.feature_extraction.image import grid_to_graph # noqa: E402
mini_adjacency = grid_to_graph(3, 3).toarray()
assert mini_adjacency.shape == (9, 9)
print(mini_adjacency[0])
###############################################################################
# In general the adjacency between voxels can be more complex, such as
# those between sensors in 3D space, or time-varying activation at brain
# vertices on a cortical surface. MNE provides several convenience functions
# for computing adjacency matrices (see the
# :ref:`Statistics API <api_reference_statistics>`).
#
# Standard clustering
# ~~~~~~~~~~~~~~~~~~~
# Here, since our data are on a grid, we can use ``adjacency=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.
titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, adjacency=None,
n_permutations=n_permutations, out_type='mask')
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
# variances :footcite:`RidgwayEtAl2012`:
titles.append(r'$\mathbf{C_{hat}}$')
stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, adjacency=None, out_type='mask',
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
p_hat = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_hat[cl] = p
ts.append(t_hat)
ps.append(p_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# .. _tfce_example:
#
# Threshold-free cluster enhancement (TFCE)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# TFCE eliminates the free parameter initial ``threshold`` value that
# determines which points are included in clustering by approximating
# a continuous integration across possible threshold values with a standard
# `Riemann sum <https://en.wikipedia.org/wiki/Riemann_sum>`__
# :footcite:`SmithNichols2009`.
# This requires giving a starting threshold ``start`` and a step
# size ``step``, which in MNE is supplied as a dict.
# The smaller the ``step`` and closer to 0 the ``start`` value,
# the better the approximation, but the longer it takes.
#
# A significant advantage of TFCE is that, rather than modifying the
# statistical null hypothesis under test (from one about individual voxels
# to one about the distribution of clusters in the data), it modifies the *data
# under test* while still controlling for multiple comparisons.
# The statistical test is then done at the level of individual voxels rather
# than clusters. This allows for evaluation of each point
# independently for significance rather than only as cluster groups.
titles.append(r'$\mathbf{C_{TFCE}}$')
threshold_tfce = dict(start=0, step=0.2)
t_tfce, _, p_tfce, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, adjacency=None,
n_permutations=n_permutations, out_type='mask')
ts.append(t_tfce)
ps.append(p_tfce)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# We can also combine TFCE and the "hat" correction:
titles.append(r'$\mathbf{C_{hat,TFCE}}$')
t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, adjacency=None, out_type='mask',
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
ts.append(t_tfce_hat)
ps.append(p_tfce_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Visualize and compare methods
# -----------------------------
# Let's take a look at these statistics. The top row shows each test statistic,
# and the bottom shows p-values for various statistical tests, with the ones
# with proper control over FWER or FDR with bold titles.
fig = plt.figure(facecolor='w', figsize=(14, 3))
assert len(ts) == len(titles) == len(ps)
for ii in range(len(ts)):
ax = [fig.add_subplot(2, 10, ii + 1, projection='3d'),
fig.add_subplot(2, 10, 11 + ii)]
plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax)
fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1)
plt.show()
###############################################################################
# The first three columns show the parametric and non-parametric statistics
# that are not corrected for multiple comparisons:
#
# - Mass univariate **t-tests** result in jagged edges.
# - **"Hat" variance correction** of the t-tests produces less peaky edges,
# correcting for sharpness in the statistic driven by low-variance voxels.
# - **Non-parametric resampling tests** are very similar to t-tests. This is to
# be expected: the data are drawn from a Gaussian distribution, and thus
# satisfy parametric assumptions.
#
# The next three columns show multiple comparison corrections of the
# mass univariate tests (parametric and non-parametric). These
# too conservatively correct for multiple comparisons because neighboring
# voxels in our data are correlated:
#
# - **Bonferroni correction** eliminates any significant activity.
# - **FDR correction** is less conservative than Bonferroni.
# - A **permutation test with a maximum statistic** also eliminates any
# significant activity.
#
# The final four columns show the non-parametric cluster-based permutation
# tests with a maximum statistic:
#
# - **Standard clustering** identifies the correct region. However, the whole
# area must be declared significant, so no peak analysis can be done.
# Also, the peak is broad.
# - **Clustering with "hat" variance adjustment** tightens the estimate of
# significant activity.
# - **Clustering with TFCE** allows analyzing each significant point
# independently, but still has a broadened estimate.
# - **Clustering with TFCE and "hat" variance adjustment** tightens the area
# declared significant (again FWER corrected).
#
# Statistical functions in MNE
# ----------------------------
# The complete listing of statistical functions provided by MNE are in
# the :ref:`Statistics API list <api_reference_statistics>`, but we will give
# a brief overview here.
#
# MNE provides several convenience parametric testing functions that can be
# used in conjunction with the non-parametric clustering methods. However,
# the set of functions we provide is not meant to be exhaustive.
#
# If the univariate statistical contrast of interest is not listed here
# (e.g., interaction term in an unbalanced ANOVA), consider checking out the
# :mod:`statsmodels` package. It offers many functions for computing
# statistical contrasts, e.g., :func:`statsmodels.stats.anova.anova_lm`.
# To use these functions in clustering:
#
# 1. Determine which test statistic (e.g., t-value, F-value) you would use
# in a univariate context to compute your contrast of interest. In other
# words, if there were only a single output such as reaction times, what
# test statistic might you compute on the data?
# 2. Wrap the call to that function within a function that takes an input of
# the same shape that is expected by your clustering function,
# and returns an array of the same shape without the "samples" dimension
# (e.g., :func:`mne.stats.permutation_cluster_1samp_test` takes an array
# of shape ``(n_samples, p, q)`` and returns an array of shape ``(p, q)``).
# 3. Pass this wrapped function to the ``stat_fun`` argument to the clustering
# function.
# 4. Set an appropriate ``threshold`` value (float or dict) based on the
# values your statistical contrast function returns.
#
# Parametric methods provided by MNE
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# - :func:`mne.stats.ttest_1samp_no_p`
# Paired t-test, optionally with hat adjustment.
# This is used by default for contrast enhancement in paired cluster tests.
#
# - :func:`mne.stats.f_oneway`
# One-way ANOVA for independent samples.
# This can be used to compute various F-contrasts. It is used by default
# for contrast enhancement in non-paired cluster tests.
#
# - :func:`mne.stats.f_mway_rm`
# M-way ANOVA for repeated measures and balanced designs.
# This returns F-statistics and p-values. The associated helper function
# :func:`mne.stats.f_threshold_mway_rm` can be used to determine the
# F-threshold at a given significance level.
#
# - :func:`mne.stats.linear_regression`
# Compute ordinary least square regressions on multiple targets, e.g.,
# sensors, time points across trials (samples).
# For each regressor it returns the beta value, t-statistic, and
# uncorrected p-value. While it can be used as a test, it is
# particularly useful to compute weighted averages or deal with
# continuous predictors.
#
# Non-parametric methods
# ^^^^^^^^^^^^^^^^^^^^^^
#
# - :func:`mne.stats.permutation_cluster_test`
# Unpaired contrasts with clustering.
#
# - :func:`mne.stats.spatio_temporal_cluster_test`
# Unpaired contrasts with spatio-temporal clustering.
#
# - :func:`mne.stats.permutation_t_test`
# Paired contrast with no clustering.
#
# - :func:`mne.stats.permutation_cluster_1samp_test`
# Paired contrasts with clustering.
#
# - :func:`mne.stats.spatio_temporal_cluster_1samp_test`
# Paired contrasts with spatio-temporal clustering.
#
# .. warning:: In most MNE functions, data has shape
# ``(..., n_space, n_time)``, where the spatial dimension can
# be e.g. sensors or source vertices. But for our spatio-temporal
# clustering functions, the spatial dimensions need to be **last**
# for computational efficiency reasons. For example, for
# :func:`mne.stats.spatio_temporal_cluster_1samp_test`, ``X``
# needs to be of shape ``(n_samples, n_time, n_space)``. You can
# use :func:`numpy.transpose` to transpose axes if necessary.
#
# References
# ----------
# .. footbibliography::
#
# .. include:: ../../links.inc
| bsd-3-clause |
vorwerkc/pymatgen | pymatgen/io/cp2k/outputs.py | 5 | 55516 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the Cp2k output parser along with a few other functions for parsing cp2k-related
outputs.
"""
import glob
import logging
import os
import re
import warnings
import numpy as np
import pandas as pd
from monty.io import zopen
from monty.json import jsanitize
from monty.re import regrep
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Orbital, Spin
from pymatgen.electronic_structure.dos import CompleteDos, Dos, add_densities
from pymatgen.io.cp2k.sets import Cp2kInput
from pymatgen.io.cp2k.utils import _postprocessor, natural_keys
from pymatgen.io.xyz import XYZ
__author__ = "Nicholas Winner"
__version__ = "0.3"
__status__ = "Development"
logger = logging.getLogger(__name__)
_hartree_to_ev_ = 2.72113838565563e01
_static_run_names_ = [
"ENERGY",
"ENERGY_FORCE",
"WAVEFUNCTION_OPTIMIZATION",
"WFN_OPT",
]
class Cp2kOutput:
"""
Class for parsing output file from CP2K. The CP2K output file is very flexible in the way that it is returned.
This class will automatically parse parameters that should always be present, but other parsing features may be
called depending on the run type.
"""
def __init__(self, filename, verbose=False, auto_load=False):
"""
Initialize the Cp2kOutput object.
Args:
filename: (str) Name of the CP2K output file to parse
verbose: (bool) Whether or not to parse with verbosity (will parse lots of data that may not be useful)
auto_load (bool): Whether or not to automatically load basic info like energies and structures.
"""
# IO Info
self.filename = filename
self.dir = os.path.dirname(filename)
self.filenames = {}
self.parse_files()
self.data = {}
# Material properties/results
self.input = None
self.initial_structure = None
self.lattice = None
self.final_structure = None
self.composition = None
self.efermi = None
self.vbm = None
self.cbm = None
self.band_gap = None
self.structures = []
self.ionic_steps = []
# parse the basic run parameters always
self.parse_cp2k_params()
self.parse_input() # parse the input file
self.parse_global_params() # Always present, parse the global parameters, most important is what run type
self.parse_dft_params() # Present so long as a DFT calculation was performed
self.parse_scf_params()
self.parse_atomic_kind_info()
# Auto-load will load the most crucial data into the data attribute
if auto_load:
self.ran_successfully() # Only if job completed. No info about convergence etc.
self.convergence() # Checks to see if job converged
self.parse_initial_structure() # Get the initial structure by parsing lattice and then parsing coords
self.parse_structures() # collect all structures from the run
self.parse_energies() # get total energy for each ionic step
self.parse_forces() # get forces on all atoms (in order), if available
self.parse_stresses() # get stress tensor and total stress at each ionic step, if available
self.parse_ionic_steps() # collect energy, forces, and total stress into ionic steps variable
self.parse_dos()
self.parse_mo_eigenvalues() # Get the eigenvalues of the MOs (for finding gaps, VBM, CBM)
self.parse_homo_lumo() # Get the HOMO LUMO gap as printed after the mo eigenvalues
self.parse_timing() # Get timing info (includes total CPU time consumed, but also much more)
# TODO: Is this the best way to implement? Should there just be the option to select each individually?
if verbose:
self.parse_scf_opt()
self.parse_opt_steps()
self.parse_total_numbers()
self.parse_mulliken()
self.parse_hirshfeld()
@property
def cp2k_version(self):
"""
The cp2k version used in the calculation
"""
return self.data.get("cp2k_version", None)
@property
def completed(self):
"""
Did the calculation complete
"""
c = self.data.get("completed", False)
if c:
return c[0][0]
return c
@property
def num_warnings(self):
"""
How many warnings showed up during the run
"""
return self.data.get("num_warnings", 0)
@property
def run_type(self):
"""
What type of run (Energy, MD, etc.) was performed
"""
return self.data.get("global").get("Run_type")
@property
def project_name(self):
"""
What project name was used for this calculation
"""
return self.data.get("global").get("project_name")
@property
def spin_polarized(self):
"""
Was the calculation spin polarized
"""
if ("UKS" or "UNRESTRICTED_KOHN_SHAM" or "LSD" or "SPIN_POLARIZED") in self.data["dft"].values():
return True
return False
@property
def is_metal(self):
"""
Was a band gap found? i.e. is it a metal
"""
if self.band_gap is None:
return True
if self.band_gap <= 0:
return True
return False
def parse_files(self):
"""
Identify files present in the directory with the cp2k output file. Looks for trajectories, dos, and cubes
"""
pdos = glob.glob(os.path.join(self.dir, "*pdos*"))
self.filenames["PDOS"] = []
self.filenames["LDOS"] = []
for p in pdos:
if p.split("/")[-1].__contains__("list"):
self.filenames["LDOS"].append(p)
else:
self.filenames["PDOS"].append(p)
self.filenames["trajectory"] = glob.glob(os.path.join(self.dir, "*pos*.xyz*"))
self.filenames["forces"] = glob.glob(os.path.join(self.dir, "*frc*.xyz*"))
self.filenames["stress"] = glob.glob(os.path.join(self.dir, "*stress*"))
self.filenames["cell"] = glob.glob(os.path.join(self.dir, "*.cell*"))
self.filenames["electron_density"] = glob.glob(os.path.join(self.dir, "*ELECTRON_DENSITY*.cube*"))
self.filenames["spin_density"] = glob.glob(os.path.join(self.dir, "*SPIN_DENSITY*.cube*"))
self.filenames["v_hartree"] = glob.glob(os.path.join(self.dir, "*hartree*.cube*"))
self.filenames["v_hartree"].sort(key=natural_keys)
restart = glob.glob(os.path.join(self.dir, "*restart*"))
self.filenames["restart.bak"] = []
for r in restart:
if r.split("/")[-1].__contains__("bak"):
self.filenames["restart.bak"].append(r)
else:
self.filenames["restart"] = r
wfn = glob.glob(os.path.join(self.dir, "*wfn*"))
self.filenames["wfn.bak"] = []
for w in wfn:
if w.split("/")[-1].__contains__("bak"):
self.filenames["wfn.bak"].append(w)
else:
self.filenames["wfn"] = w
def parse_structures(self, trajectory_file=None, lattice_file=None):
"""
Parses the structures from a cp2k calculation. Static calculations simply use the initial structure.
For calculations with ionic motion, the function will look for the appropriate trajectory and lattice
files based on naming convention. If no file is given, and no file is found, it is assumed
that the lattice/structure remained constant, and the initial lattice/structure is used.
Cp2k does not output the trajectory in the main output file by default, so non static calculations have to
reference the trajectory file.
"""
if lattice_file is None:
if len(self.filenames["cell"]) == 0:
lattice = self.parse_cell_params()
elif len(self.filenames["cell"]) == 1:
latfile = np.loadtxt(self.filenames["cell"][0])
lattice = (
[l[2:11].reshape(3, 3) for l in latfile] if len(latfile.shape) > 1 else latfile[2:11].reshape(3, 3)
)
lattice.append(lattice[-1]) # TODO is this always needed? from re-eval at minimum
else:
raise FileNotFoundError("Unable to automatically determine lattice file. More than one exist.")
else:
latfile = np.loadtxt(lattice_file)
lattice = [l[2:].reshape(3, 3) for l in latfile]
if trajectory_file is None:
if len(self.filenames["trajectory"]) == 0:
self.structures = []
self.structures.append(self.parse_initial_structure())
self.final_structure = self.structures[-1]
elif len(self.filenames["trajectory"]) == 1:
mols = XYZ.from_file(self.filenames["trajectory"][0]).all_molecules
self.structures = []
for m, l in zip(mols, lattice):
self.structures.append(
Structure(
lattice=l,
coords=[s.coords for s in m.sites],
species=[s.specie for s in m.sites],
coords_are_cartesian=True,
)
)
self.final_structure = self.structures[-1]
else:
raise FileNotFoundError("Unable to automatically determine trajectory file. More than one exist.")
else:
mols = XYZ.from_file(trajectory_file).all_molecules
self.structures = []
for m, l in zip(mols, lattice):
self.structures.append(
Structure(
lattice=l,
coords=[s.coords for s in m.sites],
species=[s.specie for s in m.sites],
coords_are_cartesian=True,
)
)
self.final_structure = self.structures[-1]
self.final_structure.set_charge(self.initial_structure.charge)
def parse_initial_structure(self):
"""
Parse the initial structure from the main cp2k output file
"""
pattern = re.compile(r"- Atoms:\s+(\d+)")
patterns = {"num_atoms": pattern}
self.read_pattern(
patterns=patterns,
reverse=False,
terminate_on_match=True,
postprocess=int,
)
coord_table = []
with zopen(self.filename, "rt") as f:
while True:
line = f.readline()
if "Atom Kind Element X Y Z Z(eff) Mass" in line:
f.readline()
for i in range(self.data["num_atoms"][0][0]):
coord_table.append(f.readline().split())
break
lattice = self.parse_cell_params()
gs = {}
for k in self.data["atomic_kind_info"].values():
if k["pseudo_potential"].upper() == "NONE":
gs[k["kind_number"]] = True
else:
gs[k["kind_number"]] = False
self.initial_structure = Structure(
lattice[0],
species=[i[2] for i in coord_table],
coords=[[float(i[4]), float(i[5]), float(i[6])] for i in coord_table],
coords_are_cartesian=True,
site_properties={"ghost": [gs.get(int(i[1])) for i in coord_table]},
)
self.initial_structure.set_charge(self.input["FORCE_EVAL"]["DFT"].get("CHARGE", [0])[0])
self.composition = self.initial_structure.composition
return self.initial_structure
def ran_successfully(self):
"""
Sanity checks that the program ran successfully. Looks at the bottom of the CP2K output file
for the "PROGRAM ENDED" line, which is printed when successfully ran. Also grabs the number
of warnings issued.
"""
program_ended_at = re.compile(r"PROGRAM ENDED AT\s+(\w+)")
num_warnings = re.compile(r"The number of warnings for this run is : (\d+)")
self.read_pattern(
patterns={"completed": program_ended_at},
reverse=True,
terminate_on_match=True,
postprocess=bool,
)
self.read_pattern(
patterns={"num_warnings": num_warnings},
reverse=True,
terminate_on_match=True,
postprocess=int,
)
if not self.completed:
raise ValueError("The provided CP2K job did not finish running! Cannot parse the file reliably.")
def convergence(self):
"""
Check whether or not the SCF and geometry optimization cycles converged.
"""
# SCF Loops
uncoverged_inner_loop = re.compile(r"(Leaving inner SCF loop)")
scf_converged = re.compile(r"(SCF run converged)|(SCF run NOT converged)")
self.read_pattern(
patterns={
"uncoverged_inner_loop": uncoverged_inner_loop,
"scf_converged": scf_converged,
},
reverse=True,
terminate_on_match=False,
postprocess=bool,
)
for i, x in enumerate(self.data["scf_converged"]):
if x[0]:
self.data["scf_converged"][i] = True
else:
self.data["scf_converged"][i] = False
# GEO_OPT
geo_opt_not_converged = re.compile(r"(MAXIMUM NUMBER OF OPTIMIZATION STEPS REACHED)")
geo_opt_converged = re.compile(r"(GEOMETRY OPTIMIZATION COMPLETED)")
self.read_pattern(
patterns={
"geo_opt_converged": geo_opt_converged,
"geo_opt_not_converged": geo_opt_not_converged,
},
reverse=True,
terminate_on_match=True,
postprocess=bool,
)
if not all(self.data["scf_converged"]):
warnings.warn(
"There is at least one unconverged SCF cycle in the provided cp2k calculation",
UserWarning,
)
if any(self.data["geo_opt_not_converged"]):
warnings.warn("Geometry optimization did not converge", UserWarning)
def parse_energies(self):
"""
Get the total energy from the output file
"""
toten_pattern = re.compile(r"Total FORCE_EVAL.*\s(-?\d+.\d+)")
self.read_pattern(
{"total_energy": toten_pattern},
terminate_on_match=False,
postprocess=float,
reverse=False,
)
self.data["total_energy"] = np.multiply(self.data.get("total_energy", []), _hartree_to_ev_)
self.final_energy = self.data.get("total_energy", [])[-1][-1]
def parse_forces(self):
"""
Get the forces from the output file
"""
if len(self.filenames["forces"]) == 1:
self.data["forces"] = [
[list(atom.coords) for atom in step]
for step in XYZ.from_file(self.filenames["forces"][0]).all_molecules
]
else:
header_pattern = r"ATOMIC FORCES.+Z"
row_pattern = r"\s+\d+\s+\d+\s+\w+\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
footer_pattern = r"SUM OF ATOMIC FORCES"
self.data["forces"] = self.read_table_pattern(
header_pattern=header_pattern,
row_pattern=row_pattern,
footer_pattern=footer_pattern,
postprocess=_postprocessor,
last_one_only=False,
)
def parse_stresses(self):
"""
Get the stresses from the output file.
"""
if len(self.filenames["stress"]) == 1:
dat = np.loadtxt(self.filenames["stress"][0], skiprows=1)
self.data["stress_tensor"] = [[list(d[2:5]), list(d[5:8]), list(d[8:11])] for d in dat]
else:
header_pattern = r"STRESS TENSOR.+Z"
row_pattern = r"\s+\w+\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
footer_pattern = r"^$"
self.data["stress_tensor"] = self.read_table_pattern(
header_pattern=header_pattern,
row_pattern=row_pattern,
footer_pattern=footer_pattern,
postprocess=_postprocessor,
last_one_only=False,
)
trace_pattern = re.compile(r"Trace\(stress tensor.+(-?\d+\.\d+E?-?\d+)")
self.read_pattern(
{"stress": trace_pattern},
terminate_on_match=False,
postprocess=float,
reverse=False,
)
def parse_ionic_steps(self):
"""
Parse the ionic step info
"""
self.ionic_steps = []
# TODO: find a better workaround. Currently when optimization is done there
# is an extra scf step before the optimization starts causing size difference
if len(self.structures) + 1 == len(self.data["total_energy"]):
self.data["total_energy"] = self.data["total_energy"][1:]
for i in range(len(self.data["total_energy"])):
self.ionic_steps.append({})
try:
self.ionic_steps[i]["E"] = self.data["total_energy"][i][0]
except (TypeError, IndexError):
warnings.warn("No total energies identified! Check output file")
try:
self.ionic_steps[i]["forces"] = self.data["forces"][i]
except (TypeError, IndexError):
pass
try:
self.ionic_steps[i]["stress_tensor"] = self.data["stress_tensor"][i][0]
except (TypeError, IndexError):
pass
try:
self.ionic_steps[i]["structure"] = self.structures[i]
except (TypeError, IndexError):
warnings.warn("Structure corresponding to this ionic step was not found!")
def parse_cp2k_params(self):
"""
Parse the CP2K general parameters from CP2K output file into a dictionary.
"""
version = re.compile(r"\s+CP2K\|.+(\d\.\d)")
input_file = re.compile(r"\s+CP2K\|\s+Input file name\s+(.+)$")
self.read_pattern(
{"cp2k_version": version, "input_filename": input_file},
terminate_on_match=True,
reverse=False,
postprocess=_postprocessor,
)
def parse_input(self):
"""
Load in the input set from the input file (if it can be found)
"""
if len(self.data["input_filename"]) == 0:
return
input_filename = self.data["input_filename"][0][0]
for ext in ["", ".gz", ".GZ", ".z", ".Z", ".bz2", ".BZ2"]:
if os.path.exists(os.path.join(self.dir, input_filename + ext)):
self.input = Cp2kInput.from_file(os.path.join(self.dir, input_filename + ext))
return
warnings.warn("Original input file not found. Some info may be lost.")
def parse_global_params(self):
"""
Parse the GLOBAL section parameters from CP2K output file into a dictionary.
"""
pat = re.compile(r"\s+GLOBAL\|\s+([\w+\s]*)\s+(\w+)")
self.read_pattern({"global": pat}, terminate_on_match=False, reverse=False)
for d in self.data["global"]:
d[0], d[1] = _postprocessor(d[0]), str(d[1])
self.data["global"] = dict(self.data["global"])
def parse_dft_params(self):
"""
Parse the DFT parameters (as well as functional, HF, vdW params)
"""
pat = re.compile(r"\s+DFT\|\s+(\w.*)\s\s\s(.*)$")
self.read_pattern(
{"dft": pat},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
self.data["dft"] = dict(self.data["dft"])
self.data["dft"]["cutoffs"] = {}
self.data["dft"]["cutoffs"]["density"] = self.data["dft"].pop("Cutoffs:_density", None)
self.data["dft"]["cutoffs"]["gradient"] = self.data["dft"].pop("gradient", None)
self.data["dft"]["cutoffs"]["tau"] = self.data["dft"].pop("tau", None)
# Functional
functional = re.compile(r"\s+FUNCTIONAL\|\s+(.+):")
self.read_pattern(
{"functional": functional},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
self.data["dft"]["functional"] = [item for sublist in self.data.pop("functional", None) for item in sublist]
# HF exchange info
hfx = re.compile(r"\s+HFX_INFO\|\s+(.+):\s+(.*)$")
self.read_pattern(
{"hfx": hfx},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
if len(self.data["hfx"]) > 0:
self.data["dft"]["hfx"] = dict(self.data.pop("hfx"))
# Van der waals correction
vdw = re.compile(r"\s+vdW POTENTIAL\|\s+(DFT-D.)\s")
self.read_pattern(
{"vdw": vdw},
terminate_on_match=False,
postprocess=_postprocessor,
reverse=False,
)
if len(self.data["vdw"]) > 0:
self.data["dft"]["vdw"] = self.data.pop("vdw")[0][0]
def parse_scf_params(self):
"""
Retrieve the most import SCF parameters: the max number of scf cycles (max_scf),
the convergence cutoff for scf (eps_scf),
:return:
"""
max_scf = re.compile(r"max_scf:\s+(\d+)")
eps_scf = re.compile(r"eps_scf:\s+(\d+)")
self.read_pattern(
{"max_scf": max_scf, "eps_scf": eps_scf},
terminate_on_match=True,
reverse=False,
)
self.data["scf"] = {}
self.data["scf"]["max_scf"] = self.data.pop("max_scf")[0][0] if self.data["max_scf"] else None
self.data["scf"]["eps_scf"] = self.data.pop("eps_scf")[0][0] if self.data["eps_scf"] else None
def parse_cell_params(self):
"""
Parse the lattice parameters (initial) from the output file
"""
cell_volume = re.compile(r"\s+CELL\|\sVolume.*\s(\d+\.\d+)")
vectors = re.compile(r"\s+CELL\| Vector.*\s(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)")
angles = re.compile(r"\s+CELL\| Angle.*\s(\d+\.\d+)")
self.read_pattern(
{"cell_volume": cell_volume, "lattice": vectors, "angles": angles},
terminate_on_match=False,
postprocess=float,
reverse=False,
)
i = iter(self.data["lattice"])
return list(zip(i, i, i))
def parse_atomic_kind_info(self):
"""
Parse info on what atomic kinds are present and what basis/pseudopotential is describing each of them.
"""
kinds = re.compile(r"Atomic kind: (\w+)")
orbital_basis_set = re.compile(r"Orbital Basis Set\s+(.+$)")
potential_information = re.compile(r"(?:Potential information for\s+(.+$))|(?:atomic kind are GHOST atoms)")
auxiliary_basis_set = re.compile(r"Auxiliary Fit Basis Set\s+(.+$)")
core_electrons = re.compile(r"Total number of core electrons\s+(\d+)")
valence_electrons = re.compile(r"Total number of valence electrons\s+(\d+)")
pseudo_energy = re.compile(r"Total Pseudopotential Energy.+(-?\d+.\d+)")
self.read_pattern(
{
"kinds": kinds,
"orbital_basis_set": orbital_basis_set,
"potential_info": potential_information,
"auxiliary_basis_set": auxiliary_basis_set,
"core_electrons": core_electrons,
"valence_electrons": valence_electrons,
"pseudo_energy": pseudo_energy,
},
terminate_on_match=True,
postprocess=str,
reverse=False,
)
atomic_kind_info = {}
for i, kind in enumerate(self.data["kinds"]):
atomic_kind_info[kind[0]] = {
"orbital_basis_set": self.data.get("orbital_basis_set")[i][0],
"pseudo_potential": self.data.get("potential_info")[i][0],
"kind_number": i + 1,
}
try:
atomic_kind_info[kind[0]]["valence_electrons"] = self.data.get("valence_electrons")[i][0]
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["valence_electrons"] = None
try:
atomic_kind_info[kind[0]]["core_electrons"] = self.data.get("core_electrons")[i][0]
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["core_electrons"] = None
try:
atomic_kind_info[kind[0]]["auxiliary_basis_set"] = self.data.get("auxiliary_basis_set")[i]
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["auxiliary_basis_set"] = None
try:
atomic_kind_info[kind[0]]["total_pseudopotential_energy"] = (
self.data.get("total_pseudopotential_energy")[i][0] * _hartree_to_ev_
)
except (TypeError, IndexError):
atomic_kind_info[kind[0]]["total_pseudopotential_energy"] = None
self.data["atomic_kind_info"] = atomic_kind_info
def parse_total_numbers(self):
"""
Parse total numbers (not usually important)
"""
atomic_kinds = r"- Atomic kinds:\s+(\d+)"
atoms = r"- Atoms:\s+(\d+)"
shell_sets = r"- Shell sets:\s+(\d+)"
shells = r"- Shells:\s+(\d+)"
primitive_funcs = r"- Primitive Cartesian functions:\s+(\d+)"
cart_base_funcs = r"- Cartesian basis functions:\s+(\d+)"
spher_base_funcs = r"- Spherical basis functions:\s+(\d+)"
self.read_pattern(
{
"atomic_kinds": atomic_kinds,
"atoms": atoms,
"shell_sets": shell_sets,
"shells": shells,
"primitive_cartesian_functions": primitive_funcs,
"cartesian_basis_functions": cart_base_funcs,
"spherical_basis_functions": spher_base_funcs,
},
terminate_on_match=True,
)
def parse_scf_opt(self):
"""
Parse the SCF cycles (not usually important)
"""
header = r"Step\s+Update method\s+Time\s+Convergence\s+Total energy\s+Change" + r"\s+\-+"
row = (
r"(\d+)\s+(\S+\s?\S+)\s+(\d+\.\d+E\+\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)?"
+ r"\s+(-?\d+\.\d+)\s+(-?\d+\.\d+E[\+\-]?\d+)"
)
footer = r"^$"
scfs = self.read_table_pattern(
header_pattern=header,
row_pattern=row,
footer_pattern=footer,
last_one_only=False,
)
self.data["electronic_steps"] = []
self.data["convergence"] = []
self.data["scf_time"] = []
for i in scfs:
self.data["scf_time"].append([float(j[-4]) for j in i])
self.data["convergence"].append([float(j[-3]) for j in i if j[-3] != "None"])
self.data["electronic_steps"].append([float(j[-2]) for j in i])
def parse_timing(self):
"""
Parse the timing info (how long did the run take).
"""
header = (
r"SUBROUTINE\s+CALLS\s+ASD\s+SELF TIME\s+TOTAL TIME" + r"\s+MAXIMUM\s+AVERAGE\s+MAXIMUM\s+AVERAGE\s+MAXIMUM"
)
row = r"(\w+)\s+(.+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)"
footer = r"\-+"
timing = self.read_table_pattern(
header_pattern=header,
row_pattern=row,
footer_pattern=footer,
last_one_only=True,
postprocess=_postprocessor,
)
self.timing = {}
for t in timing:
self.timing[t[0]] = {
"calls": {"max": t[1]},
"asd": t[2],
"self_time": {"average": t[3], "maximum": t[4]},
"total_time": {"average": t[5], "maximum": t[6]},
}
def parse_opt_steps(self):
"""
Parse the geometry optimization information
"""
# "Informations at step =" Summary block (floating point terms)
total_energy = re.compile(r"\s+Total Energy\s+=\s+(-?\d+.\d+)")
real_energy_change = re.compile(r"\s+Real energy change\s+=\s+(-?\d+.\d+)")
prediced_change_in_energy = re.compile(r"\s+Predicted change in energy\s+=\s+(-?\d+.\d+)")
scaling_factor = re.compile(r"\s+Scaling factor\s+=\s+(-?\d+.\d+)")
step_size = re.compile(r"\s+Step size\s+=\s+(-?\d+.\d+)")
trust_radius = re.compile(r"\s+Trust radius\s+=\s+(-?\d+.\d+)")
used_time = re.compile(r"\s+Used time\s+=\s+(-?\d+.\d+)")
# For RUN_TYPE=CELL_OPT
pressure_deviation = re.compile(r"\s+Pressure Deviation.*=\s+(-?\d+.\d+)")
pressure_tolerance = re.compile(r"\s+Pressure Tolerance.*=\s+(-?\d+.\d+)")
self.read_pattern(
{
"total_energy": total_energy,
"real_energy_change": real_energy_change,
"predicted_change_in_energy": prediced_change_in_energy,
"scaling_factor": scaling_factor,
"step_size": step_size,
"trust_radius": trust_radius,
"used_time": used_time,
"pressure_deviation": pressure_deviation,
"pressure_tolerance": pressure_tolerance,
},
terminate_on_match=False,
postprocess=float,
)
# "Informations at step =" Summary block (bool terms)
decrease_in_energy = re.compile(r"\s+Decrease in energy\s+=\s+(\w+)")
converged_step_size = re.compile(r"\s+Convergence in step size\s+=\s+(\w+)")
converged_rms_step = re.compile(r"\s+Convergence in RMS step\s+=\s+(\w+)")
converged_in_grad = re.compile(r"\s+Conv\. in gradients\s+=\s+(\w+)")
converged_in_rms_grad = re.compile(r"\s+Conv\. in RMS gradients\s+=\s+(\w+)")
pressure_converged = re.compile(r"\s+Conv\. for PRESSURE\s+=\s+(\w+)")
self.read_pattern(
{
"decrease_in_energy": decrease_in_energy,
"converged_step_size": converged_step_size,
"converged_rms_step": converged_rms_step,
"converged_in_grad": converged_in_grad,
"converged_in_rms_grad": converged_in_rms_grad,
"pressure_converged": pressure_converged,
},
terminate_on_match=False,
postprocess=_postprocessor,
)
def parse_mulliken(self):
"""
Parse the mulliken population analysis info for each step
:return:
"""
header = r"Mulliken Population Analysis.+Net charge"
pattern = r"\s+(\d)\s+(\w+)\s+(\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
footer = r".+Total charge"
d = self.read_table_pattern(
header_pattern=header,
row_pattern=pattern,
footer_pattern=footer,
last_one_only=False,
)
if d:
print("Found data, but not yet implemented!")
def parse_hirshfeld(self):
"""
parse the hirshfeld population analysis for each step
"""
uks = self.spin_polarized
header = r"Hirshfeld Charges.+Net charge"
footer = r"^$"
if not uks:
pattern = r"\s+(\d)\s+(\w+)\s+(\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
d = self.read_table_pattern(
header_pattern=header,
row_pattern=pattern,
footer_pattern=footer,
last_one_only=False,
)
for i, ionic_step in enumerate(d):
population = []
net_charge = []
for site in ionic_step:
population.append(site[4])
net_charge.append(site[5])
hirshfeld = [{"population": population[j], "net_charge": net_charge[j]} for j in range(len(population))]
self.structures[i].add_site_property("hirshfield", hirshfeld)
else:
pattern = (
r"\s+(\d)\s+(\w+)\s+(\d+)\s+(-?\d+\.\d+)\s+"
+ r"(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)"
)
d = self.read_table_pattern(
header_pattern=header,
row_pattern=pattern,
footer_pattern=footer,
last_one_only=False,
)
for i, ionic_step in enumerate(d):
population = []
net_charge = []
spin_moment = []
for site in ionic_step:
population.append(tuple(site[4:5]))
spin_moment.append(site[6])
net_charge.append(site[7])
hirshfeld = [
{
"population": population[j],
"net_charge": net_charge[j],
"spin_moment": spin_moment[j],
}
for j in range(len(population))
]
self.structures[i].add_site_property("hirshfield", hirshfeld)
def parse_mo_eigenvalues(self):
"""
Parse the MO eigenvalues from the cp2k output file. Will get the eigenvalues (and band gap)
at each ionic step (if more than one exist).
Everything is decomposed by spin channel. If calculation was performed without spin polarization,
then only Spin.up will be present, which represents the average of up and down.
"""
eigenvalues = []
band_gap = []
efermi = []
with zopen(self.filename, "rt") as f:
lines = iter(f.readlines())
for line in lines:
try:
if line.__contains__(" occupied subspace spin"):
eigenvalues.append(
{
"occupied": {Spin.up: [], Spin.down: []},
"unoccupied": {Spin.up: [], Spin.down: []},
}
)
efermi.append({Spin.up: None, Spin.down: None})
next(lines)
while True:
line = next(lines)
if line.__contains__("Fermi"):
efermi[-1][Spin.up] = float(line.split()[-1])
break
eigenvalues[-1]["occupied"][Spin.up].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
next(lines)
line = next(lines)
if line.__contains__(" occupied subspace spin"):
next(lines)
while True:
line = next(lines)
if line.__contains__("Fermi"):
efermi[-1][Spin.down] = float(line.split()[-1])
break
eigenvalues[-1]["occupied"][Spin.down].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
if line.__contains__(" unoccupied subspace spin"):
next(lines)
line = next(lines)
while True:
if line.__contains__("WARNING : did not converge"):
warnings.warn(
"Convergence of eigenvalues for " "unoccupied subspace spin 1 did NOT converge"
)
next(lines)
next(lines)
next(lines)
line = next(lines)
eigenvalues[-1]["unoccupied"][Spin.up].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
next(lines)
line = next(lines)
break
line = next(lines)
if "Eigenvalues" in line or "HOMO" in line:
break
eigenvalues[-1]["unoccupied"][Spin.up].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
if line.__contains__(" unoccupied subspace spin"):
next(lines)
line = next(lines)
while True:
if line.__contains__("WARNING : did not converge"):
warnings.warn(
"Convergence of eigenvalues for " "unoccupied subspace spin 2 did NOT converge"
)
next(lines)
next(lines)
next(lines)
line = next(lines)
eigenvalues[-1]["unoccupied"][Spin.down].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
break
line = next(lines)
if line.__contains__("HOMO"):
next(lines)
break
try:
eigenvalues[-1]["unoccupied"][Spin.down].extend(
[_hartree_to_ev_ * float(l) for l in line.split()]
)
except AttributeError:
break
except ValueError:
eigenvalues = [
{
"occupied": {Spin.up: None, Spin.down: None},
"unoccupied": {Spin.up: None, Spin.down: None},
}
]
warnings.warn("Convergence of eigenvalues for one or more subspaces did NOT converge")
self.data["eigenvalues"] = eigenvalues
self.data["band_gap"] = band_gap
if len(eigenvalues) == 0:
warnings.warn("No MO eigenvalues detected.")
return
# self.data will always contained the eigenvalues resolved by spin channel. The average vbm, cbm, gap,
# and fermi are saved as class attributes, as there is (usually) no assymmetry in these values for
# common materials
if self.spin_polarized:
self.data["vbm"] = {
Spin.up: np.max(eigenvalues[-1]["occupied"][Spin.up]),
Spin.down: np.max(eigenvalues[-1]["occupied"][Spin.down]),
}
self.data["cbm"] = {
Spin.up: np.min(eigenvalues[-1]["unoccupied"][Spin.up]),
Spin.down: np.min(eigenvalues[-1]["unoccupied"][Spin.down]),
}
self.vbm = (self.data["vbm"][Spin.up] + self.data["vbm"][Spin.down]) / 2
self.cbm = (self.data["cbm"][Spin.up] + self.data["cbm"][Spin.down]) / 2
self.efermi = (efermi[-1][Spin.up] + efermi[-1][Spin.down]) / 2
else:
self.data["vbm"] = {
Spin.up: np.max(eigenvalues[-1]["occupied"][Spin.up]),
Spin.down: None,
}
self.data["cbm"] = {
Spin.up: np.min(eigenvalues[-1]["unoccupied"][Spin.up]),
Spin.down: None,
}
self.vbm = self.data["vbm"][Spin.up]
self.cbm = self.data["cbm"][Spin.up]
self.efermi = efermi[-1][Spin.up]
def parse_homo_lumo(self):
"""
Find the HOMO - LUMO gap in [eV]. Returns the last value. For gaps/eigenvalues decomposed by
spin up/spin down channel and over many ionic steps, see parse_mo_eigenvalues()
"""
pattern = re.compile(r"HOMO.*-.*LUMO.*gap.*\s(-?\d+.\d+)")
self.read_pattern(
patterns={"band_gap": pattern},
reverse=True,
terminate_on_match=False,
postprocess=float,
)
bg = {Spin.up: [], Spin.down: []}
for i in range(len(self.data["band_gap"])):
if self.spin_polarized:
if i % 2:
bg[Spin.up].append(self.data["band_gap"][i][0])
else:
bg[Spin.down].append(self.data["band_gap"][i][0])
else:
bg[Spin.up].append(self.data["band_gap"][i][0])
bg[Spin.down].append(self.data["band_gap"][i][0])
self.data["band_gap"] = bg
self.band_gap = (bg[Spin.up][-1] + bg[Spin.down][-1]) / 2 if bg[Spin.up] and bg[Spin.down] else None
def parse_dos(self, pdos_files=None, ldos_files=None, sigma=0):
"""
Parse the pdos_ALPHA files created by cp2k, and assimilate them into a CompleteDos object.
Either provide a list of PDOS file paths, or use glob to find the .pdos_ALPHA extension in
the calculation directory.
Args:
pdos_files (list): list of pdos file paths, otherwise they will be inferred
ldos_Files (list): list of ldos file paths, otherwise they will be inferred
sigma (float): Gaussian smearing parameter, if desired. Because cp2k is generally
used as a gamma-point only code, this is often needed to get smooth DOS that
are comparable to k-point averaged DOS
"""
if pdos_files is None:
pdos_files = self.filenames["PDOS"]
if ldos_files is None:
ldos_files = self.filenames["LDOS"]
# Parse specie projected dos
tdos, pdoss, ldoss = None, {}, {}
for pdos_file in pdos_files:
_pdos, _tdos = parse_dos(pdos_file, total=True, sigma=sigma)
for k in _pdos:
if k in pdoss:
for orbital in _pdos[k]:
pdoss[k][orbital].densities.update(_pdos[k][orbital].densities)
else:
pdoss.update(_pdos)
if not tdos:
tdos = _tdos
else:
if not all([_tdos.densities.keys() == tdos.densities.keys()]):
tdos.densities.update(_tdos.densities)
else:
tdos.densities = add_densities(density1=_tdos.densities, density2=tdos.densities)
# parse any site-projected dos
for ldos_file in ldos_files:
_pdos = parse_dos(ldos_file, sigma=sigma)
for k in _pdos:
if k in ldoss:
for orbital in _pdos[k]:
ldoss[k][orbital].densities.update(_pdos[k][orbital].densities)
else:
ldoss.update(_pdos)
self.data["pdos"] = jsanitize(pdoss, strict=True)
self.data["ldos"] = jsanitize(ldoss, strict=True)
self.data["tdos"] = jsanitize(tdos, strict=True)
# If number of site-projected dos == number of sites, assume they are bijective
# and create the CompleteDos object
_ldoss = {}
if len(ldoss) == len(self.initial_structure):
for k in self.data["ldos"]:
_ldoss[self.initial_structure[int(k) - 1]] = self.data["ldos"][k]
self.data["cdos"] = CompleteDos(self.final_structure, total_dos=tdos, pdoss=_ldoss)
@staticmethod
def _gauss_smear(densities, energies, npts, width):
if not width:
return densities
"""Return a gaussian smeared DOS"""
d = np.zeros(npts)
e_s = np.linspace(min(energies), max(energies), npts)
for e, _pd in zip(energies, densities):
weight = np.exp(-(((e_s - e) / width) ** 2)) / (np.sqrt(np.pi) * width)
d += _pd * weight
return d
def read_pattern(self, patterns, reverse=False, terminate_on_match=False, postprocess=str):
r"""
This function originally comes from pymatgen.io.vasp.outputs Outcar class
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(
self.filename,
patterns,
reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess,
)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(
self,
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
attribute_name=None,
last_one_only=True,
):
r"""
This function originally comes from pymatgen.io.vasp.outputs Outcar class
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
attribute_name (str): Name of this table. If present the parsed data
will be attached to "data. e.g. self.data["efg"] = [...]
last_one_only (bool): All the tables will be parsed, if this option
is set to True, only the last table will be returned. The
enclosing list will be removed. i.e. Only a single table will
be returned. Default to be True.
Returns:
List of tables. 1) A table is a list of rows. 2) A row if either a list of
attribute values in case the the capturing group is defined without name in
row_pattern, or a dict in case that named capturing groups are defined by
row_pattern.
"""
with zopen(self.filename, "rt") as f:
text = f.read()
table_pattern_text = header_pattern + r"\s*^(?P<table_body>(?:\s+" + row_pattern + r")+)\s+" + footer_pattern
table_pattern = re.compile(table_pattern_text, re.MULTILINE | re.DOTALL)
rp = re.compile(row_pattern)
tables = []
for mt in table_pattern.finditer(text):
table_body_text = mt.group("table_body")
table_contents = []
for line in table_body_text.split("\n"):
ml = rp.search(line)
d = ml.groupdict()
if len(d) > 0:
processed_line = {k: postprocess(v) for k, v in d.items()}
else:
processed_line = [postprocess(v) for v in ml.groups()]
table_contents.append(processed_line)
tables.append(table_contents)
if last_one_only:
retained_data = tables[-1]
else:
retained_data = tables
if attribute_name is not None:
self.data[attribute_name] = retained_data
return retained_data
def as_dict(self):
"""
Return dictionary representation of the output
"""
d = {"input": {}, "output": {}}
d["total_time"] = self.timing["CP2K"]["total_time"]["maximum"]
d["run_type"] = self.run_type
d["input"]["global"] = self.data.get("global")
d["input"]["dft"] = self.data.get("dft", None)
d["input"]["scf"] = self.data.get("scf", None)
d["input"]["structure"] = self.initial_structure.as_dict()
d["input"]["atomic_kind_info"] = self.data.get("atomic_kind_info", None)
d["input"]["cp2k_input"] = self.input
d["ran_successfully"] = self.completed
d["cp2k_version"] = self.cp2k_version
d["output"]["structure"] = self.final_structure.as_dict()
d["output"]["ionic_steps"] = self.ionic_steps
d["composition"] = self.composition.as_dict()
d["output"]["energy"] = self.final_energy
d["output"]["energy_per_atom"] = self.final_energy / self.composition.num_atoms
d["output"]["bandgap"] = self.band_gap
d["output"]["cbm"] = self.cbm
d["output"]["vbm"] = self.vbm
d["output"]["efermi"] = self.efermi
d["output"]["is_metal"] = self.is_metal
return d
def parse_energy_file(energy_file):
"""
Parses energy file for calculations with multiple ionic steps.
"""
columns = [
"step",
"kinetic_energy",
"temp",
"potential_energy",
"conserved_quantity",
"used_time",
]
df = pd.read_table(energy_file, skiprows=1, names=columns, sep=r"\s+")
df["kinetic_energy"] = df["kinetic_energy"] * _hartree_to_ev_
df["potential_energy"] = df["potential_energy"] * _hartree_to_ev_
df["conserved_quantity"] = df["conserved_quantity"] * _hartree_to_ev_
df.astype(float)
d = {c: df[c].values for c in columns}
return d
def parse_dos(dos_file=None, spin_channel=None, total=False, sigma=0):
"""
Parse a single DOS file created by cp2k. Must contain one PDOS snapshot. i.e. you cannot
use this cannot deal with multiple concatenated dos files.
Args:
dos_file (list): list of pdos_ALPHA file paths
spin_channel (int): Which spin channel the file corresponds to. By default, CP2K will
write the file with ALPHA or BETA in the filename (for spin up or down), but
you can specify this here, in case you have a manual file name.
spin_channel == 1 --> spin up, spin_channel == -1 --> spin down.
total (bool): Whether to grab the total occupations, or the orbital decomposed ones.
sigma (float): width for gaussian smearing, if desired
Returns:
Everything necessary to create a dos object, in dict format:
(1) orbital decomposed DOS dict:
i.e. pdoss = {specie: {orbital.s: {Spin.up: ... }, orbital.px: {Spin.up: ... } ...}}
(2) energy levels of this dos file
(3) fermi energy (in eV).
DOS object is not created here
"""
if spin_channel:
spin = Spin(spin_channel)
else:
spin = Spin.down if os.path.split(dos_file)[-1].__contains__("BETA") else Spin.up
with zopen(dos_file, "rt") as f:
lines = f.readlines()
kind = re.search(r"atomic kind\s(.*)\sat iter", lines[0]) or re.search(r"list\s(\d+)\s(.*)\sat iter", lines[0])
kind = kind.groups()[0]
efermi = float(lines[0].split()[-2]) * _hartree_to_ev_
header = re.split(r"\s{2,}", lines[1].replace("#", "").strip())[2:]
dat = np.loadtxt(dos_file)
def cp2k_to_pmg_labels(x):
if x == "p":
return "px"
if x == "d":
return "dxy"
if x == "f":
return "f_3"
if x == "d-2":
return "dxy"
if x == "d-1":
return "dyz"
if x == "d0":
return "dz2"
if x == "d+1":
return "dxz"
if x == "d+2":
return "dx2"
if x == "f-3":
return "f_3"
if x == "f-2":
return "f_2"
if x == "f-1":
return "f_1"
if x == "f0":
return "f0"
if x == "f+1":
return "f1"
if x == "f+2":
return "f2"
if x == "f+3":
return "f3"
return x
header = [cp2k_to_pmg_labels(h) for h in header]
data = dat[:, 1:]
data[:, 0] *= _hartree_to_ev_
energies = data[:, 0] * _hartree_to_ev_
data = gauss_smear(data, sigma)
pdos = {
kind: {
getattr(Orbital, h): Dos(efermi=efermi, energies=energies, densities={spin: data[:, i + 2]})
for i, h in enumerate(header)
}
}
if total:
tdos = Dos(
efermi=efermi,
energies=energies,
densities={spin: np.sum(data[:, 2:], axis=1)},
)
return pdos, tdos
return pdos
def gauss_smear(data, width):
"""Return a gaussian smeared DOS"""
if not width:
return data
npts, nOrbitals = data.shape
e_s = np.linspace(np.min(data[:, 0]), np.max(data[:, 0]), data.shape[0])
grid = np.multiply(np.ones((npts, npts)), e_s).T
def smear(d):
return np.sum(
np.multiply(
np.exp(-((np.subtract(grid, data[:, 0]) / width) ** 2)) / (np.sqrt(np.pi) * width),
d,
),
axis=1,
)
return np.array([smear(data[:, i]) for i in range(1, nOrbitals)]).T
| mit |
jprchlik/aia_mkmovie | aia_select_cutout.py | 1 | 30686 | import matplotlib
#Use TkAgg backend for plotting
matplotlib.use('TkAgg',warn=False,force=True)
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
#implement the deault mpl key bindings
from matplotlib.backend_bases import key_press_handler,MouseEvent
import numpy as np
import sys
import matplotlib.pyplot as plt
from datetime import datetime
try:
import sunpy.map
from sunpy.cm import cm
except ImportError:
sys.stdout.write("sunpy not installed, use pip install sunpy --upgrade")
#check the python version to use one Tkinter syntax or another
if sys.version_info[0] < 3:
import Tkinter as Tk
import tkMessageBox as box
import tkFileDialog as Tkf
else:
import tkinter as Tk
from tkinter import messagebox as box
from tkinter import filedialog as Tkf
#main gui class
class gui_c(Tk.Frame):
#init gui class
def __init__(self,parent,flist,w0=1900,h0=1144,cy=0,cx=0,color3=False,img_scale=None):
"""
Shows AIA image in 3 color or single image for scaling and region selection. After you
choose a region, the class will pass the parameters onto aia_mkimage from aia_mkmovie.
Parameters
----------
parent : Tk.Frame
A Tk.Frame instance.
flist : list
A list of files. If the list has 3 dimensions then the GUI will create a 3 color
image.
w0: int or float, optional
The pixel width of the output to pass to aia_mkmovie (Can be set in the GUI).
If the height (h0) is larger than
w0 the program will switch the two parameters on output.
However, it will also transpose the x and y axes, which allows
for rotated images and movies. Default = 1900
h0: int or float, optional
The pixel height of the output to pass to aia_mkmovie (can be set in GUI).
If h0 is larger than the
width (w0) the program will switch the two parameters on
output. However, it will also transpose the x and y axes,
which allows for rotated images and movies. Default = 1144
cx : float or int, optional
Center of the field of view for creating images. If cx is set
then the image is assumed to be a cutout. Selecting in prompt
overrides cx. Default = 0.0 (can be set in GUI).
cy : float or int, optional
Center of the field of view for creating images. If cy is set
then the image is assumed to be a cutout. Selecting in prompt
overrides cy. Default = 0.0 (can be set in GUI).
color3 : boolean, optional
Create a 3 color image. If color3 set to True panel must be
False and the wavelength list must be 4 wavelengths long.
The wav list has the following format [R, G, B]. Default =
False.
img_scale: dictionary, optional
Pass a dictionary where the key is a 4 character wavelength string with left padded 0s
in Angstroms and the values are a list. The first element in the list is a color map.
By default the first element contains the color map given by sunpy for a given wavelength
(e.g. for 131 the color map is cm.sdoaia131). The second and third element are respectively
the minimum and maximum color map values. The minimum and maximum assume a arcsinh
transformation and exposure normalized values. The program uses arcsinh for all image
scaling because the arcsinh function behaves like a log transformation at large
values but does not error at negative values. If the user gives no image scale
then a default image scale loads. The default color table works well for single
and panel images but not for 3 color images. Will pass updated values updated
in GUI to aia_mkimage through aia_mkmovie.
"""
Tk.Frame.__init__(self,parent,background='white') #create initial frame with white background
#set the starting list to be 0
self.order = 0
#get maximum value in list
self.ordermax = len(flist)-1
#create parent variable
self.parent = parent
#check image height
if isinstance(h0,(int,float)):
self.h0 = h0
else:
sys.stdout.write('h0 must be an integer or float (Assuming 0)')
self.h0 = 0.0
#check image width
if isinstance(w0,(int,float)):
self.w0 = w0
else:
sys.stdout.write('w0 must be an integer or float (Assuming 0)')
self.w0 = 0.0
#check image x center
if isinstance(cx,(int,float)):
self.cx = cx
else:
sys.stdout.write('cx must be an integer or float (Assuming 0)')
self.cx = 0.0
#check image y center
if isinstance(cy,(int,float)):
self.cy = cy
else:
sys.stdout.write('cy must be an integer or float (Assuming 0)')
self.cy = 0.0
#3 color images
if isinstance(color3,bool):
self.color3 = color3
else:
sys.stdout.write('color3 must be a boolean')
#file list
if isinstance(flist,str):
self.flist = [flist]
elif isinstance(flist,list):
self.flist = flist
#correct for single value list in color3
if ((self.color3) & (len(flist) == 3)): self.flist = [self.flist]
else:
sys.stdout.write('flist must be a string or list')
#clicked point on the figure
self.clicked = False
#first clicked point on the figure
self.firstclick = False
#Dictionary for vmax, vmin, and color
if img_scale is None:
self.img_scale = {'0094':[cm.sdoaia94 ,np.arcsinh(1.),np.arcsinh(150.)],
'0131':[cm.sdoaia131 ,np.arcsinh(1.),np.arcsinh(500.)],
'0171':[cm.sdoaia171 ,np.arcsinh(10.),np.arcsinh(2500.)],
'0193':[cm.sdoaia193 ,np.arcsinh(10.),np.arcsinh(4500.)],
'0211':[cm.sdoaia211 ,np.arcsinh(10.),np.arcsinh(4000.)],
'0304':[cm.sdoaia304 ,np.arcsinh(2.),np.arcsinh(300.)],
'0335':[cm.sdoaia335 ,np.arcsinh(1.),np.arcsinh(100.)],
'1600':[cm.sdoaia1600,np.arcsinh(20.),np.arcsinh(500.)],
'1700':[cm.sdoaia1700,np.arcsinh(200.),np.arcsinh(4000.)]}
elif isinstance(img_scale,dict):
self.img_scale = img_scale
else:
sys.stdout.write('img_scale must be a dictionary with color map, min value, max value')
#Start the creation of the window and GUI
self.centerWindow()
self.FigureWindow()
self.initUI()
self.aia_set()
self.aia_plot()
#Create area and window for figure
def FigureWindow(self):
#set the information based on screen size
x = self.parent.winfo_screenwidth()
y = self.parent.winfo_screenheight()
aiaframe = Tk.Frame(self)
aratio = float(x)/float(y)
#Create the figure
self.f,self.a = plt.subplots(ncols=2,figsize=(8*aratio,8*aratio*.5))
#Separate the two plotting windows
self.x = self.a[1]
self.a = self.a[0]
#turn off clicked axis for starters
self.x.set_axis_off()
#Create window for the plot
self.canvas = FigureCanvasTkAgg(self.f,master=self)
#Draw the plot
self.canvas.draw()
#Turn on matplotlib widgets
self.canvas.get_tk_widget().pack(side=Tk.TOP,fill=Tk.BOTH,expand=1)
#Display matplotlib widgets
self.toolbar = NavigationToolbar2TkAgg(self.canvas,self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP,fill=Tk.BOTH,expand=1)
#Connect mpl to mouse clicking
self.f.canvas.mpl_connect('button_press_event',self.on_click_event)
#Connect mpl to mouse clicking
#self.f.canvas.mpl_connect('key_press_event',self.on_key_event)
#create button to go up an order
upbutton = Tk.Button(master=aiaframe,text='Increase File',command=self.increaseorder)
upbutton.pack(side=Tk.LEFT)
#create button to go down an order
downbutton = Tk.Button(master=aiaframe,text='Decrease File',command=self.decreaseorder)
downbutton.pack(side=Tk.LEFT)
aiaframe.pack(side=Tk.TOP)
#Create window in center of screen
def centerWindow(self):
w = 2000
h = 1200
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw-w)/2
y = (sh-h)/2
self.parent.geometry('%dx%d+%d+%d' % (w,h,x,y))
#Intialize the GUI
def initUI(self):
#set up the title
self.parent.title("Select AIA Region")
#create frame for plotting
frame = Tk.Frame(self,relief=Tk.RAISED,borderwidth=1)
frame.pack(fill=Tk.BOTH,expand=1)
self.pack(fill=Tk.BOTH,expand=1)
#set up okay and quit buttons
quitButton = Tk.Button(self,text="Quit",command=self.onExit)
quitButton.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up center width box
w0Text = Tk.StringVar()
w0Text.set("Width (pixels)")
w0Dir = Tk.Label(self,textvariable=w0Text,height=4)
w0Dir.pack(side=Tk.LEFT)
#Add so width can be updated
w0 = Tk.StringVar()
w0.set('{0:5.2f}'.format(self.w0))
self.w0val = Tk.Entry(self,textvariable=w0,width=10)
self.w0val.bind("<Return>",self.aia_param)
self.w0val.pack(side=Tk.LEFT,padx=5,pady=5)
#set up center h0 box
h0Text = Tk.StringVar()
h0Text.set("Height (pixels)")
h0Dir = Tk.Label(self,textvariable=h0Text,height=4)
h0Dir.pack(side=Tk.LEFT)
#Add so center height can be updated
h0 = Tk.StringVar()
h0.set('{0:5.2f}'.format(self.h0))
self.h0val = Tk.Entry(self,textvariable=h0,width=10)
self.h0val.bind("<Return>",self.aia_param)
self.h0val.pack(side=Tk.LEFT,padx=5,pady=5)
#set up center x0 box
cxText = Tk.StringVar()
cxText.set("X0 (arcsec)")
cxDir = Tk.Label(self,textvariable=cxText,height=4)
cxDir.pack(side=Tk.LEFT)
#Add so center x can be updated
self.scx = Tk.StringVar()
self.scx.set('{0:5.2f}'.format(self.cx))
self.cxval = Tk.Entry(self,textvariable=self.scx,width=10)
self.cxval.bind("<Return>",self.aia_param)
self.cxval.pack(side=Tk.LEFT,padx=5,pady=5)
#set up center y box
cyText = Tk.StringVar()
cyText.set("Y0 (arcsec)")
cyDir = Tk.Label(self,textvariable=cyText,height=4)
cyDir.pack(side=Tk.LEFT)
#Add so center y can be updated
self.scy = Tk.StringVar()
self.scy.set('{0:5.2f}'.format(self.cy))
self.cyval = Tk.Entry(self,textvariable=self.scy,width=10)
self.cyval.bind("<Return>",self.aia_param)
self.cyval.pack(side=Tk.LEFT,padx=5,pady=5)
#set up order number
orderText = Tk.StringVar()
orderText.set("Order")
orderDir = Tk.Label(self,textvariable=orderText,height=4)
orderDir.pack(side=Tk.LEFT)
#Add so order number can be updated
self.sorder = Tk.StringVar()
self.sorder.set(str(int(self.order)))
self.orderval = Tk.Entry(self,textvariable=self.sorder,width=5)
self.orderval.bind("<Return>",self.on_order_box)
self.orderval.pack(side=Tk.LEFT,padx=5,pady=5)
#boxes to create if 3 color image
if self.color3:
###############################################
# BLUE COLOR BOXES #
###############################################
#Add so Color Min can be updated
self.bcmin = Tk.StringVar()
self.bcmin.set('{0:5.2f}'.format(0))
self.bcminval = Tk.Entry(self,textvariable=self.bcmin,width=10)
self.bcminval.bind("<Return>",self.aia_param)
self.bcminval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Min
bcminText = Tk.StringVar()
bcminText.set("B Color Min.")
bcminDir = Tk.Label(self,textvariable=bcminText,height=4)
bcminDir.pack(side=Tk.RIGHT)
#Add so Color Max can be updated
self.bcmax = Tk.StringVar()
self.bcmax.set('{0:5.2f}'.format(0))
self.bcmaxval = Tk.Entry(self,textvariable=self.bcmax,width=10)
self.bcmaxval.bind("<Return>",self.aia_param)
self.bcmaxval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Max
bcmaxText = Tk.StringVar()
bcmaxText.set("B Color Max.")
bcmaxDir = Tk.Label(self,textvariable=bcmaxText,height=4)
bcmaxDir.pack(side=Tk.RIGHT)
###############################################
# GREEN COLOR BOXES #
###############################################
#Add so Color Min can be updated
self.gcmin = Tk.StringVar()
self.gcmin.set('{0:5.2f}'.format(0))
self.gcminval = Tk.Entry(self,textvariable=self.gcmin,width=10)
self.gcminval.bind("<Return>",self.aia_param)
self.gcminval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Min
gcminText = Tk.StringVar()
gcminText.set("G Color Min.")
gcminDir = Tk.Label(self,textvariable=gcminText,height=4)
gcminDir.pack(side=Tk.RIGHT)
#Add so Color Max can be updated
self.gcmax = Tk.StringVar()
self.gcmax.set('{0:5.2f}'.format(0))
self.gcmaxval = Tk.Entry(self,textvariable=self.gcmax,width=10)
self.gcmaxval.bind("<Return>",self.aia_param)
self.gcmaxval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Max
gcmaxText = Tk.StringVar()
gcmaxText.set("G Color Max.")
gcmaxDir = Tk.Label(self,textvariable=gcmaxText,height=4)
gcmaxDir.pack(side=Tk.RIGHT)
###############################################
# RED COLOR BOXES #
###############################################
#Add so Color Min can be updated
self.rcmin = Tk.StringVar()
self.rcmin.set('{0:5.2f}'.format(0))
self.rcminval = Tk.Entry(self,textvariable=self.rcmin,width=10)
self.rcminval.bind("<Return>",self.aia_param)
self.rcminval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Min
rcminText = Tk.StringVar()
rcminText.set("R Color Min.")
rcminDir = Tk.Label(self,textvariable=rcminText,height=4)
rcminDir.pack(side=Tk.RIGHT)
#Add so Color Max can be updated
self.rcmax = Tk.StringVar()
self.rcmax.set('{0:5.2f}'.format(0))
self.rcmaxval = Tk.Entry(self,textvariable=self.rcmax,width=10)
self.rcmaxval.bind("<Return>",self.aia_param)
self.rcmaxval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Max
rcmaxText = Tk.StringVar()
rcmaxText.set("R Color Max.")
rcmaxDir = Tk.Label(self,textvariable=rcmaxText,height=4)
rcmaxDir.pack(side=Tk.RIGHT)
#boxes to create if single wavelength image
else:
#Add so Color Min can be updated
self.cmin = Tk.StringVar()
self.cmin.set('{0:5.2f}'.format(0))
self.cminval = Tk.Entry(self,textvariable=self.cmin,width=10)
self.cminval.bind("<Return>",self.aia_param)
self.cminval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Min
cminText = Tk.StringVar()
cminText.set("Color Min.")
cminDir = Tk.Label(self,textvariable=cminText,height=4)
cminDir.pack(side=Tk.RIGHT)
#Add so Color Max can be updated
self.cmax = Tk.StringVar()
self.cmax.set('{0:5.2f}'.format(0))
self.cmaxval = Tk.Entry(self,textvariable=self.cmax,width=10)
self.cmaxval.bind("<Return>",self.aia_param)
self.cmaxval.pack(side=Tk.RIGHT,padx=5,pady=5)
#set up Color Max
cmaxText = Tk.StringVar()
cmaxText.set("Color Max.")
cmaxDir = Tk.Label(self,textvariable=cmaxText,height=4)
cmaxDir.pack(side=Tk.RIGHT)
#set up Submenu
menubar = Tk.Menu(self.parent)
self.parent.config(menu=menubar)
fileMenu = Tk.Menu(menubar)
subMenu = Tk.Menu(fileMenu)
#create another item in menu
fileMenu.add_separator()
fileMenu.add_command(label='Exit',underline=0,command=self.onExit)
#set AIA parameters
def aia_param(self,event):
#release cursor from entry box and back to the figure
#needs to be done otherwise key strokes will not work
self.f.canvas._tkcanvas.focus_set()
try:
self.h0 = float(self.h0val.get())
self.w0 = float(self.w0val.get())
self.cx = float(self.cxval.get())
self.cy = float(self.cyval.get())
#update the color parameters
#color table update if 3 color image
if self.color3:
#Update R color scale
self.rmin = float(self.rcminval.get())
self.rmax = float(self.rcmaxval.get())
self.img_scale[self.wav[0]][1] = self.rmin
self.img_scale[self.wav[0]][2] = self.rmax
#Update G color scale
self.gmin = float(self.gcminval.get())
self.gmax = float(self.gcmaxval.get())
self.img_scale[self.wav[1]][1] = self.gmin
self.img_scale[self.wav[1]][2] = self.gmax
#Update B color scale
self.bmin = float(self.bcminval.get())
self.bmax = float(self.bcmaxval.get())
self.img_scale[self.wav[2]][1] = self.bmin
self.img_scale[self.wav[2]][2] = self.bmax
#recreate 3 color image
self.create_3color()
#color table if single image
else:
self.ivmin = float(self.cminval.get())
self.ivmax = float(self.cmaxval.get())
self.img_scale[self.wav][1] = self.ivmin
self.img_scale[self.wav][2] = self.ivmax
#now replot
self.clicked = True
self.sub_window()
self.aia_plot()
self.clicked = False
#error if not floats
except ValueError:
self.error = 10
self.onError()
#Exits the program
def onExit(self):
plt.clf()
plt.close()
self.quit()
self.parent.destroy()
#Command to increase the order to plot new aia image
def increaseorder(self):
self.order = self.order+1
if self.order > self.ordermax:
self.order = 0
self.sorder.set(str(int(self.order)))
self.clicked = True
self.a.clear()
self.aia_set()
self.aia_plot()
self.clicked = False
#Command to decrease order to plot new aia image
def decreaseorder(self):
self.order = self.order-1
if self.order < 0:
self.order = self.ordermax
self.sorder.set(str(int(self.order)))
self.clicked = True
self.a.clear()
self.aia_set()
self.aia_plot()
self.clicked = False
#create 3 color image
def create_3color(self):
self.wav = []
for j,i in enumerate(self.img):
self.wav.append('{0:4.0f}'.format(i.wavelength.value).replace(' ','0'))
#set normalized scaling for every observation
self.ivmin = self.img_scale[self.wav[j]][1]
self.ivmax = self.img_scale[self.wav[j]][2]
prelim = (np.arcsinh(i.data/i.exposure_time.value)-self.ivmin)/self.ivmax
#replace out of bounds points
prelim[prelim < 0.] = 0.
prelim[prelim > 1.] = 1.
self.img3d[:,:,j] = prelim
#set the string value in the plot window
if j == 0:
self.rcmin.set('{0:9.3}'.format(self.ivmin))
self.rcmax.set('{0:9.3}'.format(self.ivmax))
if j == 1:
self.gcmin.set('{0:9.3}'.format(self.ivmin))
self.gcmax.set('{0:9.3}'.format(self.ivmax))
if j == 2:
self.bcmin.set('{0:9.3}'.format(self.ivmin))
self.bcmax.set('{0:9.3}'.format(self.ivmax))
#Retrieved and set the time value based on R image
self.obs_time = self.img[0].date
# get the image properties
def img_prop(self):
#different plotting properties if color3 set
if self.color3:
self.create_3color()
else:
self.wav ='{0:4.0f}'.format(self.img.wavelength.value).replace(' ','0')
#use default color tables
self.icmap = self.img_scale[self.wav][0]
self.ivmin = self.img_scale[self.wav][1]
self.ivmax = self.img_scale[self.wav][2]
#set the string value in the plot window
self.cmin.set('{0:9.3}'.format(self.ivmin))
self.cmax.set('{0:9.3}'.format(self.ivmax))
#Retrieved and set the time value
self.obs_time = self.img.date
def text_loc(self):
#set text location
#if self.w0 > self.h0:
# self.txtx = -(self.w0-self.h0)
# self.txty = (self.maxy-self.miny)*0.01
#elif self.w0 < self.h0:
# self.txty = -(self.h0-self.w0)
# self.txtx = (self.maxx-self.minx)*0.01
#if self.w0 == self.h0:
self.txtx = (self.maxx-self.minx)*0.01
self.txty = (self.maxy-self.miny)*0.01
#plot the current AIA image
def aia_plot(self):
#clear the current image
self.a.clear()
#find where to put the plotting information
self.text_loc()
#Make 3 color plot
if self.color3:
self.a.imshow(self.img3d,interpolation='none',origin='lower',extent=[self.minx,self.maxx,self.miny,self.maxy])
#set the observation time which will be use for rotation
self.a.text(self.minx+self.txtx,self.miny+self.txty,'AIA {0}/{1}/{2}'.format(*self.wav)+'- {0}Z'.format(self.obs_time.strftime('%Y/%m/%d - %H:%M:%S')),color='white',fontsize=14,zorder=50,fontweight='bold')
else:
self.a.imshow(self.data0,interpolation='none',cmap=self.icmap,origin='lower',vmin=self.ivmin,vmax=self.ivmax,extent=[self.minx,self.maxx,self.miny,self.maxy])
#show current date
self.a.text(self.minx+self.txtx,self.miny+self.txty,'{0}Z'.format(self.obs_time.strftime('%Y/%m/%d - %H:%M:%S')),color='white',fontsize=14,zorder=50,fontweight='bold')
self.a.set_xlabel('Arcseconds')
self.a.set_ylabel('Arcseconds')
if ((self.clicked) & (self.firstclick)):
#make sure axis is on if not turn it on
if not self.x.get_frame_on(): self.x.set_axis_on()
#Show the clicked region in a separate plot
self.x.clear()
#3 color image
if self.color3:
self.x.imshow(self.img3d,interpolation='none',origin='lower',extent=[self.minx,self.maxx,self.miny,self.maxy])
else:
self.x.imshow(self.data0,interpolation='none',cmap=self.icmap,origin='lower',vmin=self.ivmin,vmax=self.ivmax,extent=[self.minx,self.maxx,self.miny,self.maxy])
self.x.set_xlim([min(self.xbox),max(self.xbox)])
self.x.set_ylim([min(self.ybox),max(self.ybox)])
self.x.scatter(self.cx,self.cy,marker='x',color='red',s=35,zorder=499)
self.x.set_xlabel('Arcseconds')
self.x.set_ylabel('Arcseconds')
#show the selected region on the big plot
self.a.scatter(self.cx,self.cy,marker='x',color='red',s=35,zorder=499)
self.a.plot(self.xbox,self.ybox,color='black',linewidth=5,zorder=500)
self.a.plot(self.xbox,self.ybox,'--',color='white',linewidth=3,zorder=501)
self.canvas.draw()
#set variables spectrum of a given order
def aia_set(self):
#set current index depending on 3color image
if self.color3:
self.infile = self.flist[self.order]
#else single file
else:
self.infile = self.flist[self.order]
#put file into sunpy map
self.img = sunpy.map.Map(self.infile)
self.maxx,self.minx,self.maxy,self.miny = self.img_extent() #get extent of image for coverting pixel into physical
#3 color image
if self.color3:
self.img3d = np.zeros((self.img[0].data.shape[0],self.img[0].data.shape[1],3))
self.scale = [self.img[0].scale[0].value,self.img[0].scale[1].value] # get x, y image scale
#single color image
else:
self.data0 = np.arcsinh(self.img.data/self.img.exposure_time.value) #reference the data plot seperately
self.scale = [self.img.scale[0].value,self.img.scale[1].value] # get x, y image scale
#set aia plotting preferences
self.img_prop()
def img_extent(self):
#get only physical values for first image if color 3
if self.color3:
# get the image coordinates in pixels
px0 = self.img[0].meta['crpix1']
py0 = self.img[0].meta['crpix2']
# get the image coordinates in arcsec
ax0 = self.img[0].meta['crval1']
ay0 = self.img[0].meta['crval2']
# get the image scale in arcsec
axd = self.img[0].meta['cdelt1']
ayd = self.img[0].meta['cdelt2']
#get the number of pixels
tx,ty = self.img[0].data.shape
else:
# get the image coordinates in pixels
px0 = self.img.meta['crpix1']
py0 = self.img.meta['crpix2']
# get the image coordinates in arcsec
ax0 = self.img.meta['crval1']
ay0 = self.img.meta['crval2']
# get the image scale in arcsec
axd = self.img.meta['cdelt1']
ayd = self.img.meta['cdelt2']
#get the number of pixels
tx,ty = self.img.data.shape
#get the max and min x and y values
pminx,pmaxx = 0.-px0,tx-px0
pminy,pmaxy = 0.-py0,ty-py0
#convert to arcsec
maxx,minx = ax0+pmaxx*axd,ax0+pminx*axd
maxy,miny = ay0+pmaxy*ayd,ay0+pminy*ayd
return maxx,minx,maxy,miny
#Basic click event
def on_click_event(self,click):
#Click envents for continuum selection
#Make sure you click inside the plot
try:
#test to make sure the data are in the plot
test = click.xdata-0.
test = click.ydata-0.
#store the physical value of clicked points
self.cx = click.xdata
self.cy = click.ydata
#tell if the plot has beeen clicked at least once
self.firstclick = True
#tell the plot its been clicked
self.clicked = True
#create subwindow of selected region
self.sub_window()
#update the x and y parameters in bottom box
self.scx.set('{0:5.2f}'.format(self.cx))
self.scy.set('{0:5.2f}'.format(self.cy))
#plot new cutout box
self.aia_plot()
#update the plot parameters
#reset to no click
self.clicked = False
#Throw error if clicked outside the plot
except TypeError:
self.error = 20
self.onError()
#create window for plotting
def sub_window(self):
self.xbox = [self.cx-(self.scale[0]*self.w0/2.),self.cx-(self.scale[0]*self.w0/2.),self.cx+(self.scale[0]*self.w0/2.),self.cx+(self.scale[0]*self.w0/2.),self.cx-(self.scale[0]*self.w0/2.)]
self.ybox = [self.cy-(self.scale[1]*self.h0/2.),self.cy+(self.scale[1]*self.h0/2.),self.cy+(self.scale[1]*self.h0/2.),self.cy-(self.scale[1]*self.h0/2.),self.cy-(self.scale[1]*self.h0/2.)]
#Function for retrieving order from popup
def on_order(self):
m = 0
while m == 0:
try:
inputO = order_popup(root)
root.wait_window(inputO.top)
order = int(inputO.order)
if ((order > 0) & (order <= self.ordermax)):
m = 1
else:
#Error order is out of range
self.error = 3
self.onError()
except ValueError:
#Error order is not an integer
self.error = 4
self.onError()
return order
#Function for retrieving order from text box
def on_order_box(self,event):
#release cursor from entry box and back to the figure
#needs to be done otherwise key strokes will not work
self.f.canvas._tkcanvas.focus_set()
m = 0
while m == 0:
try:
order = self.orderval.get()
order = int(order)
if ((order > 0) & (order <= self.ordermax)):
m = 1
self.order = order
self.aia_set()
self.aia_plot()
else:
#Error order is out of range
self.error = 3
self.onError()
except ValueError:
#Error order is not an integer
self.error = 4
self.onError()
#Tells Why Order information is incorrect
def onError(self):
if self.error == 1:
box.showerror("Error","File Not Found")
if self.error == 4:
box.showerror("Error","Value Must be an Integer")
if self.error == 6:
box.showerror("Error","File is not in Fits Format")
if self.error == 10:
box.showerror("Error","Value Must be Float")
if self.error == 20:
box.showerror("Error","Must Select Inside Plot Bounds")
#main loop
def main():
global root
root = Tk.Tk()
app = gui_c(root)
root.mainloop()
if __name__=="__main__":
#create root frame
main()
| mit |
emon10005/scikit-image | skimage/feature/tests/test_util.py | 35 | 2818 | import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from numpy.testing import assert_equal, assert_raises
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_prepare_grayscale_input_2D,
_mask_border_keypoints, plot_matches)
def test_feature_detector():
assert_raises(NotImplementedError, FeatureDetector().detect, None)
def test_descriptor_extractor():
assert_raises(NotImplementedError, DescriptorExtractor().extract,
None, None)
def test_prepare_grayscale_input_2D():
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
def test_mask_border_keypoints():
keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
[1, 1, 1, 1, 1])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
[0, 0, 1, 1, 1])
assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
[0, 0, 1, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
[0, 0, 0, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
[0, 0, 0, 0, 1])
@np.testing.decorators.skipif(plt is None)
def test_plot_matches():
fig, ax = plt.subplots(nrows=1, ncols=1)
shapes = (((10, 10), (10, 10)),
((10, 10), (12, 10)),
((10, 10), (10, 12)),
((10, 10), (12, 12)),
((12, 10), (10, 10)),
((10, 12), (10, 10)),
((12, 12), (10, 10)))
keypoints1 = 10 * np.random.rand(10, 2)
keypoints2 = 10 * np.random.rand(10, 2)
idxs1 = np.random.randint(10, size=10)
idxs2 = np.random.randint(10, size=10)
matches = np.column_stack((idxs1, idxs2))
for shape1, shape2 in shapes:
img1 = np.zeros(shape1)
img2 = np.zeros(shape2)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
only_matches=True)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
keypoints_color='r')
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
matches_color='r')
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| bsd-3-clause |
pianomania/scikit-learn | sklearn/tests/test_common.py | 39 | 6031 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield _named_check(check, name), name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _named_check(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
| bsd-3-clause |
albertaparicio/tfg-voice-conversion | seq2seq_pytorch_main.py | 1 | 38363 | # -*- coding: utf-8 -*-
# TODO Add argparser
"""
Translation with a Sequence to Sequence Network and Attention
*************************************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
In this project we will be teaching a neural network to translate from
French to English.
::
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .
> vous etes trop maigre .
= you re too skinny .
< you re all alone .
... to varying degrees of success.
This is made possible by the simple but powerful idea of the `sequence
to sequence network <http://arxiv.org/abs/1409.3215>`__, in which two
recurrent neural networks work together to transform one sequence to
another. An encoder network condenses an input sequence into a vector,
and a decoder network unfolds that vector into a new sequence.
.. figure:: /_static/img/seq-seq-images/seq2seq.png
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- http://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in
general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about Sequence to Sequence networks and
how they work:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
:doc:`/intermediate/char_rnn_classification_tutorial`
and :doc:`/intermediate/char_rnn_generation_tutorial`
helpful as those concepts are very similar to the Encoder and Decoder
models, respectively.
And for more, read the papers that introduced these topics:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
**Requirements**
"""
from __future__ import division, print_function, unicode_literals
import argparse
import glob
import gzip
import os
import random
from sys import version_info
import h5py
import numpy as np
import torch
import torch.nn as nn
from ahoproc_tools import error_metrics
from tfglib.seq2seq_normalize import mask_data
from tfglib.utils import init_logger
from torch import optim
from torch.autograd import Variable
from seq2seq_dataloader import DataLoader
from seq2seq_pytorch_model import AttnDecoderRNN, EncoderRNN
use_cuda = torch.cuda.is_available()
# Conditional imports
if version_info.major > 2:
import pickle
else:
import cPickle as pickle
logger, opts = None, None
if __name__ == '__main__':
# logger.debug('Before parsing args')
parser = argparse.ArgumentParser(
description="Convert voice signal with seq2seq model")
parser.add_argument('--train_data_path', type=str,
default="tcstar_data_trim/training/")
parser.add_argument('--train_out_file', type=str,
default="tcstar_data_trim/seq2seq_train_datatable")
parser.add_argument('--test_data_path', type=str,
default="tcstar_data_trim/test/")
parser.add_argument('--test_out_file', type=str,
default="tcstar_data_trim/seq2seq_test_datatable")
parser.add_argument('--val_fraction', type=float, default=0.25)
parser.add_argument('--save-h5', dest='save_h5', action='store_true',
help='Save dataset to .h5 file')
parser.add_argument('--max_seq_length', type=int, default=500)
parser.add_argument('--params_len', type=int, default=44)
# parser.add_argument('--patience', type=int, default=4,
# help="Patience epochs to do validation, if validation "
# "score is worse than train for patience epochs "
# ", quit training. (Def: 4).")
# parser.add_argument('--enc_rnn_layers', type=int, default=1)
# parser.add_argument('--dec_rnn_layers', type=int, default=1)
parser.add_argument('--hidden_size', type=int, default=256)
# parser.add_argument('--cell_type', type=str, default="lstm")
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--learning_rate', type=float, default=0.0005)
# parser.add_argument('--dropout', type=float, default=0)
parser.add_argument('--teacher_forcing_ratio', type=float, default=1)
parser.add_argument('--SOS_token', type=int, default=0)
# parser.add_argument('--optimizer', type=str, default="adam")
# parser.add_argument('--clip_norm', type=float, default=5)
# parser.add_argument('--attn_length', type=int, default=500)
# parser.add_argument('--attn_size', type=int, default=256)
# parser.add_argument('--save_every', type=int, default=100)
parser.add_argument('--no-train', dest='do_train',
action='store_false', help='Flag to train or not.')
parser.add_argument('--no-test', dest='do_test',
action='store_false', help='Flag to test or not.')
parser.add_argument('--save_path', type=str, default="training_results")
parser.add_argument('--pred_path', type=str, default="torch_predicted")
# parser.add_argument('--tb_path', type=str, default="")
parser.add_argument('--log', type=str, default="INFO")
parser.add_argument('--load_model', dest='load_model', action='store_true',
help='Load previous model before training')
parser.add_argument('--server', dest='server', action='store_true',
help='Commands to be run or not run if we are running '
'on server')
parser.set_defaults(do_train=True, do_test=True, save_h5=False,
server=False) # ,
# load_model=False)
opts = parser.parse_args()
# Initialize logger
logger_level = opts.log
logger = init_logger(name=__name__, level=opts.log)
logger.debug('Parsed arguments')
if not os.path.exists(os.path.join(opts.save_path, 'torch_train')):
os.makedirs(os.path.join(opts.save_path, 'torch_train'))
# save config
with gzip.open(os.path.join(opts.save_path, 'torch_train', 'config.pkl.gz'),
'wb') as cf:
pickle.dump(opts, cf)
def main(args):
logger.debug('Main')
# If-else for training and testing
if args.do_train:
dl = DataLoader(args, logger_level=args.log,
max_seq_length=args.max_seq_length)
encoder1 = EncoderRNN(args.params_len, args.hidden_size, args.batch_size)
attn_decoder1 = AttnDecoderRNN(args.hidden_size, args.params_len,
batch_size=args.batch_size, n_layers=1,
max_length=args.max_seq_length,
dropout_p=0.1)
if use_cuda:
encoder1 = encoder1.cuda()
attn_decoder1 = attn_decoder1.cuda()
trained_encoder, trained_decoder = train_epochs(dl, encoder1, attn_decoder1)
if args.do_test:
# TODO What do we do for testing?
# pass
dl = DataLoader(args, logger_level=args.log, test=True,
max_seq_length=args.max_seq_length)
if args.load_model:
encoder = EncoderRNN(args.params_len, args.hidden_size, args.batch_size)
decoder = AttnDecoderRNN(args.hidden_size, args.params_len,
batch_size=args.batch_size, n_layers=1,
max_length=args.max_seq_length,
dropout_p=0.1)
if use_cuda:
encoder = encoder.cuda()
decoder = decoder.cuda()
else:
encoder = trained_encoder
decoder = trained_decoder
test(encoder, decoder, dl)
######################################################################
# Loading data files
# ==================
#
# The data for this project is a set of many thousands of English to
# French translation pairs.
#
# `This question on Open Data Stack
# Exchange <http://opendata.stackexchange.com/questions/3888/dataset-of
# -sentences-translated-into-many-languages>`__
# pointed me to the open translation site http://tatoeba.org/ which has
# downloads available at http://tatoeba.org/eng/downloads - and better
# yet, someone did the extra work of splitting language pairs into
# individual text files here: http://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so
# download to ``data/eng-fra.txt`` before continuing. The file is a tab
# separated list of translation pairs:
#
# ::
#
# I am cold. Je suis froid.
#
# .. Note::
# Download the data from
# `here <https://download.pytorch.org/tutorial/data.zip>`_
# and extract it to the current directory.
######################################################################
# Similar to the character encoding used in the character-level RNN
# tutorials, we will be representing each word in a language as a one-hot
# vector, or giant vector of zeros except for a single one (at the index
# of the word). Compared to the dozens of characters that might exist in a
# language, there are many many more words, so the encoding vector is much
# larger. We will however cheat a bit and trim the data to only use a few
# thousand words per language.
#
# .. figure:: /_static/img/seq-seq-images/word-encoding.png
# :alt:
#
#
######################################################################
# We'll need a unique index per word to use as the inputs and targets of
# the networks later. To keep track of all this we will use a helper class
# called ``Lang`` which has word → index (``word2index``) and index → word
# (``index2word``) dictionaries, as well as a count of each word
# ``word2count`` to use to later replace rare words.
#
# EOS_token = 1
#
#
# class Lang:
# def __init__(self, name):
# self.name = name
# self.word2index = {}
# self.word2count = {}
# self.index2word = {0: "SOS", 1: "EOS"}
# self.n_words = 2 # Count SOS and EOS
#
# def add_sentence(self, sentence):
# for word in sentence.split(' '):
# self.add_word(word)
#
# def add_word(self, word):
# if word not in self.word2index:
# self.word2index[word] = self.n_words
# self.word2count[word] = 1
# self.index2word[self.n_words] = word
# self.n_words += 1
# else:
# self.word2count[word] += 1
#
#
# ######################################################################
# # The files are all in Unicode, to simplify we will turn Unicode
# # characters to ASCII, make everything lowercase, and trim most
# # punctuation.
# #
#
# # Turn a Unicode string to plain ASCII, thanks to
# # http://stackoverflow.com/a/518232/2809427
# def unicode_to_ascii(s):
# return ''.join(
# c for c in unicodedata.normalize('NFD', s)
# if unicodedata.category(c) != 'Mn'
# )
#
#
# # Lowercase, trim, and remove non-letter characters
# def normalize_string(s):
# s = unicode_to_ascii(s.lower().strip())
# s = re.sub(r"([.!?])", r" \1", s)
# s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
# return s
#
#
# ######################################################################
# # To read the data file we will split the file into lines, and then split
# # lines into pairs. The files are all English → Other Language, so if we
# # want to translate from Other Language → English I added the ``reverse``
# # flag to reverse the pairs.
# #
#
# def read_langs(lang1, lang2, reverse=False):
# print("Reading lines...")
#
# # Read the file and split into lines
# lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8'). \
# read().strip().split('\n')
#
# # Split every line into pairs and normalize
# pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines]
#
# # Reverse pairs, make Lang instances
# if reverse:
# pairs = [list(reversed(p)) for p in pairs]
# input_lang = Lang(lang2)
# output_lang = Lang(lang1)
# else:
# input_lang = Lang(lang1)
# output_lang = Lang(lang2)
#
# return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
#
# eng_prefixes = (
# "i am ", "i m ",
# "he is", "he s ",
# "she is", "she s",
# "you are", "you re ",
# "we are", "we re ",
# "they are", "they re "
# )
#
#
# def filter_pair(p):
# return len(p[0].split(' ')) < opts.max_seq_length and \
# len(p[1].split(' ')) < opts.max_seq_length and \
# p[1].startswith(eng_prefixes)
#
#
# def filter_pairs(pairs):
# return [pair for pair in pairs if filter_pair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
#
# def prepare_data(lang1, lang2, reverse=False):
# input_lang, output_lang, pairs = read_langs(lang1, lang2, reverse)
# print("Read %s sentence pairs" % len(pairs))
# pairs = filter_pairs(pairs)
# print("Trimmed to %s sentence pairs" % len(pairs))
# print("Counting words...")
# for pair in pairs:
# input_lang.add_sentence(pair[0])
# output_lang.add_sentence(pair[1])
# print("Counted words:")
# print(input_lang.name, input_lang.n_words)
# print(output_lang.name, output_lang.n_words)
# return input_lang, output_lang, pairs
#
#
# input_lang, output_lang, pairs = prepare_data('eng', 'fra', True)
# print(random.choice(pairs))
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
#
# def indexes_from_sentence(lang, sentence):
# return [lang.word2index[word] for word in sentence.split(' ')]
#
#
# def variable_from_sentence(lang, sentence):
# indexes = indexes_from_sentence(lang, sentence)
# indexes.append(EOS_token)
# result = Variable(torch.LongTensor(indexes).view(-1, 1))
# if use_cuda:
# return result.cuda()
# else:
# return result
#
#
# def variables_from_pair(pair):
# input_variable = variable_from_sentence(input_lang, pair[0])
# target_variable = variable_from_sentence(output_lang, pair[1])
# return input_variable, target_variable
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# decoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://minds.jacobs-university.de/sites/default/files/uploads
# /papers/ESNTutorialRev.pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
def train(input_variable, target_variable, encoder, decoder,
encoder_optimizer,
decoder_optimizer, criterion, max_length):
encoder_hidden = encoder.init_hidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
# decoder_input = Variable(torch.LongTensor([[opts.SOS_token]]))
decoder_input = Variable(torch.zeros(opts.batch_size, opts.params_len))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < opts.teacher_forcing_ratio \
else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output[0], target_variable[di])
# if ni == EOS_token:
# break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / percent
rs = es - s
return '%s (ETA: %s)' % (as_minutes(s), as_minutes(rs))
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of epochs, time so far, estimated time) and average loss.
#
def train_epochs(dataloader, encoder, decoder):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
batch_idx = 0
total_batch_idx = 0
curr_epoch = 0
b_epoch = dataloader.train_batches_per_epoch
# encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
# decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
encoder_optimizer = optim.Adam(encoder.parameters(), lr=opts.learning_rate)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=opts.learning_rate)
# training_pairs = [variables_from_pair(random.choice(pairs))
# for _ in range(n_epochs)]
# criterion = nn.NLLLoss()
criterion = nn.MSELoss()
# Train on dataset batches
for src_batch, src_batch_seq_len, trg_batch, trg_mask in \
dataloader.next_batch():
if curr_epoch == 0 and batch_idx == 0:
logger.info(
'Batches per epoch: {}'.format(b_epoch))
logger.info(
'Total batches: {}'.format(b_epoch * opts.epoch))
# beg_t = timeit.default_timer()
# for epoch in range(1, n_epochs + 1):
# training_pair = training_pairs[epoch - 1]
# input_variable = training_pair[0]
# target_variable = training_pair[1]
# Transpose data to be shaped (max_seq_length, num_sequences, params_len)
input_variable = Variable(
torch.from_numpy(src_batch[:, :, 0:44]).float()
).transpose(1, 0).contiguous()
target_variable = Variable(
torch.from_numpy(trg_batch).float()
).transpose(1, 0).contiguous()
input_variable = input_variable.cuda() if use_cuda else input_variable
target_variable = target_variable.cuda() if use_cuda else target_variable
loss = train(input_variable, target_variable, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion,
opts.max_seq_length)
print_loss_total += loss
# plot_loss_total += loss
print_loss_avg = print_loss_total / (total_batch_idx + 1)
plot_losses.append(print_loss_avg)
logger.info(
'Batch {:2.0f}/{:2.0f} - Epoch {:2.0f}/{:2.0f} ({:3.2%}) - Loss={'
':.8f} - Time: {'
'!s}'.format(
batch_idx,
b_epoch,
curr_epoch + 1,
opts.epoch,
((batch_idx % b_epoch) + 1) / b_epoch,
print_loss_avg,
time_since(start,
(total_batch_idx + 1) / (b_epoch * opts.epoch))))
if batch_idx >= b_epoch:
curr_epoch += 1
batch_idx = 0
print_loss_total = 0
# Save model
# Instructions for saving and loading a model:
# http://pytorch.org/docs/notes/serialization.html
# with gzip.open(
enc_file = os.path.join(opts.save_path, 'torch_train',
'encoder_{}.pkl'.format(
curr_epoch)) # , 'wb') as enc:
torch.save(encoder.state_dict(), enc_file)
# with gzip.open(
dec_file = os.path.join(opts.save_path, 'torch_train',
'decoder_{}.pkl'.format(
curr_epoch)) # , 'wb') as dec:
torch.save(decoder.state_dict(), dec_file)
# TODO Validation?
batch_idx += 1
total_batch_idx += 1
if curr_epoch >= opts.epoch:
logger.info('Finished epochs -> BREAK')
break
if not opts.server:
show_plot(plot_losses)
else:
save_path = os.path.join(opts.save_path, 'torch_train', 'graphs')
if not os.path.exists(save_path):
os.makedirs(save_path)
np.savetxt(os.path.join(save_path, 'train_losses' + '.csv'), plot_losses)
return encoder, decoder
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
if not opts.server:
import matplotlib
matplotlib.use('TKagg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def show_plot(points, filename='train_loss'):
if not os.path.exists(
os.path.join(opts.save_path, 'torch_train', 'graphs')):
os.makedirs(os.path.join(opts.save_path, 'torch_train', 'graphs'))
plt.figure()
# fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
# loc = ticker.MultipleLocator(base=0.2)
# ax.yaxis.set_major_locator(loc)
plt.plot(points)
plt.grid(b=True)
plt.savefig(
os.path.join(opts.save_path, 'torch_train', 'graphs',
filename + '.eps'),
bbox_inches='tight')
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def test(encoder, decoder, dl):
if opts.load_model:
# Get filenames of last epoch files
enc_file = sorted(glob.glob(
os.path.join(opts.save_path, 'torch_train', 'encoder*.pkl')))[-1]
dec_file = sorted(glob.glob(
os.path.join(opts.save_path, 'torch_train', 'decoder*.pkl')))[-1]
# Open model files and load
# with gzip.open(enc_file, 'r') as enc:
# enc_f = pickle.load(enc)
encoder.load_state_dict(torch.load(enc_file))
#
# with gzip.open(dec_file, 'wb') as dec:
decoder.load_state_dict(torch.load(dec_file))
batch_idx = 0
n_batch = 0
attentions = []
for (src_batch_padded, src_batch_seq_len, trg_batch, trg_mask) in dl.next_batch(
test=True):
src_batch = []
# Take the last `seq_len` timesteps of each sequence to remove padding
for i in range(src_batch_padded.shape[0]):
src_batch.append(src_batch_padded[i,-src_batch_seq_len[i]:,:])
# TODO Get filename from datatable
f_name = format(n_batch + 1,
'0' + str(max(5, len(str(dl.src_test_data.shape[0])))))
input_variable = Variable(
torch.from_numpy(src_batch[:, :, 0:44]).float()
).transpose(1, 0).contiguous()
input_variable = input_variable.cuda() if use_cuda else input_variable
input_length = input_variable.size()[0]
encoder_hidden = encoder.init_hidden()
encoder_outputs = Variable(
torch.zeros(opts.max_seq_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
# decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = Variable(torch.zeros(opts.batch_size, opts.params_len))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_frames = []
decoder_attentions = torch.zeros(opts.max_seq_length, opts.batch_size,
opts.max_seq_length)
for di in range(opts.max_seq_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
# topv, topi = decoder_output.data.topk(1)
# ni = topi[0][0]
# if ni == EOS_token:
# decoded_frames.append('<EOS>')
# break
# else:
decoded_frames.append(decoder_output.data.cpu().numpy())
# decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_output
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
# Decode output frames
predictions = np.array(decoded_frames).transpose((1, 0, 2))
attentions.append(decoder_attentions[:di + 1].numpy().transpose((1, 0, 2)))
# TODO Decode speech data and display attentions
# Save original U/V flags to save them to file
raw_uv_flags = predictions[:, :, 42]
# Unscale target and predicted parameters
for i in range(predictions.shape[0]):
src_spk_index = int(src_batch[i, 0, 44])
trg_spk_index = int(src_batch[i, 0, 45])
# Prepare filename
# Get speakers names
src_spk_name = dl.s2s_datatable.src_speakers[src_spk_index]
trg_spk_name = dl.s2s_datatable.trg_speakers[trg_spk_index]
# Make sure the save directory exists
tf_pred_path = os.path.join(opts.test_data_path, opts.pred_path)
if not os.path.exists(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name)):
os.makedirs(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name))
# # TODO Get filename from datatable
# f_name = format(i + 1, '0' + str(
# max(5, len(str(dl.src_test_data.shape[0])))))
with h5py.File(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.h5'), 'w') as file:
file.create_dataset('predictions', data=predictions[i],
compression="gzip",
compression_opts=9)
file.create_dataset('target', data=trg_batch[i],
compression="gzip",
compression_opts=9)
file.create_dataset('mask', data=trg_mask[i],
compression="gzip",
compression_opts=9)
trg_spk_max = dl.train_trg_speakers_max[trg_spk_index, :]
trg_spk_min = dl.train_trg_speakers_min[trg_spk_index, :]
trg_batch[i, :, 0:42] = (trg_batch[i, :, 0:42] * (
trg_spk_max - trg_spk_min)) + trg_spk_min
predictions[i, :, 0:42] = (predictions[i, :, 0:42] * (
trg_spk_min - trg_spk_min)) + trg_spk_min
# Round U/V flags
predictions[i, :, 42] = np.round(predictions[i, :, 42])
# Remove padding in prediction and target parameters
masked_trg = mask_data(trg_batch[i], trg_mask[i])
trg_batch[i] = np.ma.filled(masked_trg, fill_value=0.0)
unmasked_trg = np.ma.compress_rows(masked_trg)
masked_pred = mask_data(predictions[i], trg_mask[i])
predictions[i] = np.ma.filled(masked_pred, fill_value=0.0)
unmasked_prd = np.ma.compress_rows(masked_pred)
# # Apply U/V flag to lf0 and mvf params
# unmasked_prd[:, 40][unmasked_prd[:, 42] == 0] = -1e10
# unmasked_prd[:, 41][unmasked_prd[:, 42] == 0] = 1000
# Apply ground truth flags to prediction
unmasked_prd[:, 40][unmasked_trg[:, 42] == 0] = -1e10
unmasked_prd[:, 41][unmasked_trg[:, 42] == 0] = 1000
file.create_dataset('unmasked_prd', data=unmasked_prd,
compression="gzip",
compression_opts=9)
file.create_dataset('unmasked_trg', data=unmasked_trg,
compression="gzip",
compression_opts=9)
file.create_dataset('trg_max', data=trg_spk_max,
compression="gzip",
compression_opts=9)
file.create_dataset('trg_min', data=trg_spk_min,
compression="gzip",
compression_opts=9)
file.close()
# Save predictions to files
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.vf.dat'),
unmasked_prd[:, 41]
)
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.lf0.dat'),
unmasked_prd[:, 40]
)
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.mcp.dat'),
unmasked_prd[:, 0:40],
delimiter='\t'
)
np.savetxt(
os.path.join(tf_pred_path, src_spk_name + '-' + trg_spk_name,
f_name + '_' + str(i) + '.uv.dat'),
raw_uv_flags[i, :]
)
# Display metrics
print('Num - {}'.format(n_batch))
print('MCD = {} dB'.format(
error_metrics.MCD(unmasked_trg[:, 0:40].reshape(-1, 40),
unmasked_prd[:, 0:40].reshape(-1, 40))))
acc, _, _, _ = error_metrics.AFPR(unmasked_trg[:, 42].reshape(-1, 1),
unmasked_prd[:, 42].reshape(-1, 1))
print('U/V accuracy = {}'.format(acc))
pitch_rmse = error_metrics.RMSE(
np.exp(unmasked_trg[:, 40].reshape(-1, 1)),
np.exp(unmasked_prd[:, 40].reshape(-1, 1)))
print('Pitch RMSE = {}'.format(pitch_rmse))
# Increase batch index
if batch_idx >= dl.test_batches_per_epoch:
break
batch_idx += 1
n_batch += 1
# Dump attentions to pickle file
logger.info('Saving attentions to pickle file')
with gzip.open(
os.path.join(opts.save_path, 'torch_train', 'attentions.pkl.gz'),
'wb') as att_file:
pickle.dump(attentions, att_file)
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
# def evaluate_randomly(encoder, decoder, n=10):
# for i in range(n):
# pair = random.choice(pairs)
# print('>', pair[0])
# print('=', pair[1])
# output_words, attentions = evaluate(encoder, decoder, pair[0])
# output_sentence = ' '.join(output_words)
# print('<', output_sentence)
# print('')
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it's easier to run multiple experiments easier) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainEpochs`` again.
#
######################################################################
#
# evaluate_randomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
# ---------------------
#
# A useful property of the attention mechanism is its highly interpretable
# outputs. Because it is used to weight specific encoder outputs of the
# input sequence, we can imagine looking where the network is focused most
# at each time step.
#
# You could simply run ``plt.matshow(attentions)`` to see attention output
# displayed as a matrix, with the columns being input steps and rows being
# output steps:
#
#
# output_words, attentions = evaluate(
# encoder1, attn_decoder1, "je suis trop froid .")
# plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def show_attention():
# Load attentions
logger.info('Loading attentions to pickle file')
with gzip.open(
os.path.join(opts.save_path, 'torch_train', 'attentions.pkl.gz'),
'r') as att_file:
attentions = pickle.load(att_file)
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# # Set up axes
# ax.set_xticklabels([''] + input_sentence.split(' ') +
# ['<EOS>'], rotation=90)
# ax.set_yticklabels([''] + output_words)
#
# # Show label at every tick
# ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
# def evaluate_and_show_attention(input_sentence):
# output_words, attentions = evaluate(
# encoder1, attn_decoder1, input_sentence)
# print('input =', input_sentence)
# print('output =', ' '.join(output_words))
# show_attention(input_sentence, output_words, attentions)
#
#
# evaluate_and_show_attention("elle a cinq ans de moins que moi .")
#
# evaluate_and_show_attention("elle est trop petit .")
#
# evaluate_and_show_attention("je ne crains pas de mourir .")
#
# evaluate_and_show_attention("c est un jeune directeur plein de talent .")
######################################################################
# Exercises
# =========
#
# - Try with a different dataset
#
# - Another language pair
# - Human → Machine (e.g. IOT commands)
# - Chat → Response
# - Question → Answer
#
# - Replace the embedding pre-trained word embeddings such as word2vec or
# GloVe
# - Try with more layers, more hidden units, and more sentences. Compare
# the training time and results.
# - If you use a translation file where pairs have two of the same phrase
# (``I am test \t I am test``), you can use this as an autoencoder. Try
# this:
#
# - Train as an autoencoder
# - Save only the Encoder network
# - Train a new Decoder for translation from there
#
if __name__ == '__main__':
logger.debug('Before calling main')
main(opts)
| gpl-3.0 |
vybstat/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_inst_Rot/Geneva_inst_Rot_6/fullgrid/IR.py | 30 | 9364 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine; will be called later
numplots = 12
def add_sub_plot(sub_num):
plt.subplot(3,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 9:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 12:
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [75, #AR 3 7135
76, #TOTL 7325
78, #AR 3 7751
79, #6LEV 8446
80, #CA2X 8498
81, #CA2Y 8542
82, #CA2Z 8662
83, #CA 2 8579A
84, #S 3 9069
85, #H 1 9229
86, #S 3 9532
87] #H 1 9546
#create z array for this plot with given lines
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty IR Lines", fontsize=14)
# ---------------------------------------------------
for i in range(12):
add_sub_plot(i)
ax1 = plt.subplot(3,4,1)
add_patches(ax1)
print "figure complete"
plt.savefig('Dusty_Near_IR.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
rabernat/xray | xarray/tests/test_computation.py | 1 | 28519 | import functools
import operator
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
import xarray as xr
from xarray.core.computation import (
_UFuncSignature, result_name, broadcast_compat_data, collect_dict_values,
join_dict_keys, ordered_set_intersection, ordered_set_union,
unified_dim_sizes, apply_ufunc)
from . import requires_dask, raises_regex
def assert_identical(a, b):
if hasattr(a, 'identical'):
msg = 'not identical:\n%r\n%r' % (a, b)
assert a.identical(b), msg
else:
assert_array_equal(a, b)
def test_signature_properties():
sig = _UFuncSignature([['x'], ['x', 'y']], [['z']])
assert sig.input_core_dims == (('x',), ('x', 'y'))
assert sig.output_core_dims == (('z',),)
assert sig.all_input_core_dims == frozenset(['x', 'y'])
assert sig.all_output_core_dims == frozenset(['z'])
assert sig.num_inputs == 2
assert sig.num_outputs == 1
assert str(sig) == '(x),(x,y)->(z)'
assert sig.to_gufunc_string() == '(dim0),(dim0,dim1)->(dim2)'
# dimension names matter
assert _UFuncSignature([['x']]) != _UFuncSignature([['y']])
def test_result_name():
class Named(object):
def __init__(self, name=None):
self.name = name
assert result_name([1, 2]) is None
assert result_name([Named()]) is None
assert result_name([Named('foo'), 2]) == 'foo'
assert result_name([Named('foo'), Named('bar')]) is None
assert result_name([Named('foo'), Named()]) is None
def test_ordered_set_union():
assert list(ordered_set_union([[1, 2]])) == [1, 2]
assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3]
def test_ordered_set_intersection():
assert list(ordered_set_intersection([[1, 2]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2]
assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1]
assert list(ordered_set_intersection([[1, 2], [2]])) == [2]
def test_join_dict_keys():
dicts = [OrderedDict.fromkeys(keys) for keys in [['x', 'y'], ['y', 'z']]]
assert list(join_dict_keys(dicts, 'left')) == ['x', 'y']
assert list(join_dict_keys(dicts, 'right')) == ['y', 'z']
assert list(join_dict_keys(dicts, 'inner')) == ['y']
assert list(join_dict_keys(dicts, 'outer')) == ['x', 'y', 'z']
with pytest.raises(ValueError):
join_dict_keys(dicts, 'exact')
with pytest.raises(KeyError):
join_dict_keys(dicts, 'foobar')
def test_collect_dict_values():
dicts = [{'x': 1, 'y': 2, 'z': 3}, {'z': 4}, 5]
expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]]
collected = collect_dict_values(dicts, ['x', 'y', 'z'], fill_value=0)
assert collected == expected
def identity(x):
return x
def test_apply_identity():
array = np.arange(10)
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
apply_identity = functools.partial(apply_ufunc, identity)
assert_identical(array, apply_identity(array))
assert_identical(variable, apply_identity(variable))
assert_identical(data_array, apply_identity(data_array))
assert_identical(data_array, apply_identity(data_array.groupby('x')))
assert_identical(dataset, apply_identity(dataset))
assert_identical(dataset, apply_identity(dataset.groupby('x')))
def add(a, b):
return apply_ufunc(operator.add, a, b)
def test_apply_two_inputs():
array = np.array([1, 2, 3])
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
zero_array = np.zeros_like(array)
zero_variable = xr.Variable('x', zero_array)
zero_data_array = xr.DataArray(zero_variable, [('x', -array)])
zero_dataset = xr.Dataset({'y': zero_variable}, {'x': -array})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby('x'), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby('x')))
assert_identical(dataset, add(data_array.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby('x')))
assert_identical(dataset, add(dataset.groupby('x'), zero_data_array))
assert_identical(dataset, add(dataset.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby('x')))
assert_identical(dataset, add(zero_dataset, dataset.groupby('x')))
def test_apply_1d_and_0d():
array = np.array([1, 2, 3])
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
zero_array = 0
zero_variable = xr.Variable((), zero_array)
zero_data_array = xr.DataArray(zero_variable)
zero_dataset = xr.Dataset({'y': zero_variable})
assert_identical(array, add(array, zero_array))
assert_identical(array, add(zero_array, array))
assert_identical(variable, add(variable, zero_array))
assert_identical(variable, add(variable, zero_variable))
assert_identical(variable, add(zero_array, variable))
assert_identical(variable, add(zero_variable, variable))
assert_identical(data_array, add(data_array, zero_array))
assert_identical(data_array, add(data_array, zero_variable))
assert_identical(data_array, add(data_array, zero_data_array))
assert_identical(data_array, add(zero_array, data_array))
assert_identical(data_array, add(zero_variable, data_array))
assert_identical(data_array, add(zero_data_array, data_array))
assert_identical(dataset, add(dataset, zero_array))
assert_identical(dataset, add(dataset, zero_variable))
assert_identical(dataset, add(dataset, zero_data_array))
assert_identical(dataset, add(dataset, zero_dataset))
assert_identical(dataset, add(zero_array, dataset))
assert_identical(dataset, add(zero_variable, dataset))
assert_identical(dataset, add(zero_data_array, dataset))
assert_identical(dataset, add(zero_dataset, dataset))
assert_identical(data_array, add(data_array.groupby('x'), zero_data_array))
assert_identical(data_array, add(zero_data_array, data_array.groupby('x')))
assert_identical(dataset, add(data_array.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_dataset, data_array.groupby('x')))
assert_identical(dataset, add(dataset.groupby('x'), zero_data_array))
assert_identical(dataset, add(dataset.groupby('x'), zero_dataset))
assert_identical(dataset, add(zero_data_array, dataset.groupby('x')))
assert_identical(dataset, add(zero_dataset, dataset.groupby('x')))
def test_apply_two_outputs():
array = np.arange(5)
variable = xr.Variable('x', array)
data_array = xr.DataArray(variable, [('x', -array)])
dataset = xr.Dataset({'y': variable}, {'x': -array})
def twice(obj):
def func(x):
return (x, x)
return apply_ufunc(func, obj, output_core_dims=[[], []])
out0, out1 = twice(array)
assert_identical(out0, array)
assert_identical(out1, array)
out0, out1 = twice(variable)
assert_identical(out0, variable)
assert_identical(out1, variable)
out0, out1 = twice(data_array)
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset)
assert_identical(out0, dataset)
assert_identical(out1, dataset)
out0, out1 = twice(data_array.groupby('x'))
assert_identical(out0, data_array)
assert_identical(out1, data_array)
out0, out1 = twice(dataset.groupby('x'))
assert_identical(out0, dataset)
assert_identical(out1, dataset)
def test_apply_input_core_dimension():
def first_element(obj, dim):
def func(x):
return x[..., 0]
return apply_ufunc(func, obj, input_core_dims=[[dim]])
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(['x', 'y'], array)
data_array = xr.DataArray(variable, {'x': ['a', 'b'], 'y': [-1, -2]})
dataset = xr.Dataset({'data': data_array})
expected_variable_x = xr.Variable(['y'], [1, 2])
expected_data_array_x = xr.DataArray(expected_variable_x, {'y': [-1, -2]})
expected_dataset_x = xr.Dataset({'data': expected_data_array_x})
expected_variable_y = xr.Variable(['x'], [1, 3])
expected_data_array_y = xr.DataArray(expected_variable_y,
{'x': ['a', 'b']})
expected_dataset_y = xr.Dataset({'data': expected_data_array_y})
assert_identical(expected_variable_x, first_element(variable, 'x'))
assert_identical(expected_variable_y, first_element(variable, 'y'))
assert_identical(expected_data_array_x, first_element(data_array, 'x'))
assert_identical(expected_data_array_y, first_element(data_array, 'y'))
assert_identical(expected_dataset_x, first_element(dataset, 'x'))
assert_identical(expected_dataset_y, first_element(dataset, 'y'))
assert_identical(expected_data_array_x,
first_element(data_array.groupby('y'), 'x'))
assert_identical(expected_dataset_x,
first_element(dataset.groupby('y'), 'x'))
def test_apply_output_core_dimension():
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[['sign']])
if isinstance(result, (xr.Dataset, xr.DataArray)):
result.coords['sign'] = [1, -1]
return result
array = np.array([[1, 2], [3, 4]])
variable = xr.Variable(['x', 'y'], array)
data_array = xr.DataArray(variable, {'x': ['a', 'b'], 'y': [-1, -2]})
dataset = xr.Dataset({'data': data_array})
stacked_array = np.array([[[1, -1], [2, -2]], [[3, -3], [4, -4]]])
stacked_variable = xr.Variable(['x', 'y', 'sign'], stacked_array)
stacked_coords = {'x': ['a', 'b'], 'y': [-1, -2], 'sign': [1, -1]}
stacked_data_array = xr.DataArray(stacked_variable, stacked_coords)
stacked_dataset = xr.Dataset({'data': stacked_data_array})
assert_identical(stacked_array, stack_negative(array))
assert_identical(stacked_variable, stack_negative(variable))
assert_identical(stacked_data_array, stack_negative(data_array))
assert_identical(stacked_dataset, stack_negative(dataset))
assert_identical(stacked_data_array,
stack_negative(data_array.groupby('x')))
assert_identical(stacked_dataset,
stack_negative(dataset.groupby('x')))
def original_and_stack_negative(obj):
def func(x):
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ['sign']])
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
result[1].coords['sign'] = [1, -1]
return result
out0, out1 = original_and_stack_negative(array)
assert_identical(array, out0)
assert_identical(stacked_array, out1)
out0, out1 = original_and_stack_negative(variable)
assert_identical(variable, out0)
assert_identical(stacked_variable, out1)
out0, out1 = original_and_stack_negative(data_array)
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset)
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
out0, out1 = original_and_stack_negative(data_array.groupby('x'))
assert_identical(data_array, out0)
assert_identical(stacked_data_array, out1)
out0, out1 = original_and_stack_negative(dataset.groupby('x'))
assert_identical(dataset, out0)
assert_identical(stacked_dataset, out1)
def test_apply_exclude():
def concatenate(objects, dim='x'):
def func(*x):
return np.concatenate(x, axis=-1)
result = apply_ufunc(func, *objects,
input_core_dims=[[dim]] * len(objects),
output_core_dims=[[dim]],
exclude_dims={dim})
if isinstance(result, (xr.Dataset, xr.DataArray)):
# note: this will fail if dim is not a coordinate on any input
new_coord = np.concatenate([obj.coords[dim] for obj in objects])
result.coords[dim] = new_coord
return result
arrays = [np.array([1]), np.array([2, 3])]
variables = [xr.Variable('x', a) for a in arrays]
data_arrays = [xr.DataArray(v, {'x': c, 'y': ('x', range(len(c)))})
for v, c in zip(variables, [['a'], ['b', 'c']])]
datasets = [xr.Dataset({'data': data_array}) for data_array in data_arrays]
expected_array = np.array([1, 2, 3])
expected_variable = xr.Variable('x', expected_array)
expected_data_array = xr.DataArray(expected_variable, [('x', list('abc'))])
expected_dataset = xr.Dataset({'data': expected_data_array})
assert_identical(expected_array, concatenate(arrays))
assert_identical(expected_variable, concatenate(variables))
assert_identical(expected_data_array, concatenate(data_arrays))
assert_identical(expected_dataset, concatenate(datasets))
# must also be a core dimension
with pytest.raises(ValueError):
apply_ufunc(identity, variables[0], exclude_dims={'x'})
def test_apply_groupby_add():
array = np.arange(5)
variable = xr.Variable('x', array)
coords = {'x': -array, 'y': ('x', [0, 0, 1, 1, 2])}
data_array = xr.DataArray(variable, coords, dims='x')
dataset = xr.Dataset({'z': variable}, coords)
other_variable = xr.Variable('y', [0, 10])
other_data_array = xr.DataArray(other_variable, dims='y')
other_dataset = xr.Dataset({'z': other_variable})
expected_variable = xr.Variable('x', [0, 1, 12, 13, np.nan])
expected_data_array = xr.DataArray(expected_variable, coords, dims='x')
expected_dataset = xr.Dataset({'z': expected_variable}, coords)
assert_identical(expected_data_array,
add(data_array.groupby('y'), other_data_array))
assert_identical(expected_dataset,
add(data_array.groupby('y'), other_dataset))
assert_identical(expected_dataset,
add(dataset.groupby('y'), other_data_array))
assert_identical(expected_dataset,
add(dataset.groupby('y'), other_dataset))
# cannot be performed with xarray.Variable objects that share a dimension
with pytest.raises(ValueError):
add(data_array.groupby('y'), other_variable)
# if they are all grouped the same way
with pytest.raises(ValueError):
add(data_array.groupby('y'), data_array[:4].groupby('y'))
with pytest.raises(ValueError):
add(data_array.groupby('y'), data_array[1:].groupby('y'))
with pytest.raises(ValueError):
add(data_array.groupby('y'), other_data_array.groupby('y'))
with pytest.raises(ValueError):
add(data_array.groupby('y'), data_array.groupby('x'))
def test_unified_dim_sizes():
assert unified_dim_sizes([xr.Variable((), 0)]) == OrderedDict()
assert (unified_dim_sizes([xr.Variable('x', [1]),
xr.Variable('x', [1])]) ==
OrderedDict([('x', 1)]))
assert (unified_dim_sizes([xr.Variable('x', [1]),
xr.Variable('y', [1, 2])]) ==
OrderedDict([('x', 1), ('y', 2)]))
assert (unified_dim_sizes([xr.Variable(('x', 'z'), [[1]]),
xr.Variable(('y', 'z'), [[1, 2], [3, 4]])],
exclude_dims={'z'}) ==
OrderedDict([('x', 1), ('y', 2)]))
# duplicate dimensions
with pytest.raises(ValueError):
unified_dim_sizes([xr.Variable(('x', 'x'), [[1]])])
# mismatched lengths
with pytest.raises(ValueError):
unified_dim_sizes(
[xr.Variable('x', [1]), xr.Variable('x', [1, 2])])
def test_broadcast_compat_data_1d():
data = np.arange(5)
var = xr.Variable('x', data)
assert_identical(data, broadcast_compat_data(var, ('x',), ()))
assert_identical(data, broadcast_compat_data(var, (), ('x',)))
assert_identical(data[:], broadcast_compat_data(var, ('w',), ('x',)))
assert_identical(data[:, None],
broadcast_compat_data(var, ('w', 'x', 'y'), ()))
with pytest.raises(ValueError):
broadcast_compat_data(var, ('x',), ('w',))
with pytest.raises(ValueError):
broadcast_compat_data(var, (), ())
def test_broadcast_compat_data_2d():
data = np.arange(12).reshape(3, 4)
var = xr.Variable(['x', 'y'], data)
assert_identical(data, broadcast_compat_data(var, ('x', 'y'), ()))
assert_identical(data, broadcast_compat_data(var, ('x',), ('y',)))
assert_identical(data, broadcast_compat_data(var, (), ('x', 'y')))
assert_identical(data.T, broadcast_compat_data(var, ('y', 'x'), ()))
assert_identical(data.T, broadcast_compat_data(var, ('y',), ('x',)))
assert_identical(data, broadcast_compat_data(var, ('w', 'x'), ('y',)))
assert_identical(data, broadcast_compat_data(var, ('w',), ('x', 'y')))
assert_identical(data.T, broadcast_compat_data(var, ('w',), ('y', 'x')))
assert_identical(data[:, :, None],
broadcast_compat_data(var, ('w', 'x', 'y', 'z'), ()))
assert_identical(data[None, :, :].T,
broadcast_compat_data(var, ('w', 'y', 'x', 'z'), ()))
def test_keep_attrs():
def add(a, b, keep_attrs):
if keep_attrs:
return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs)
else:
return apply_ufunc(operator.add, a, b)
a = xr.DataArray([0, 1], [('x', [0, 1])])
a.attrs['attr'] = 'da'
b = xr.DataArray([1, 2], [('x', [0, 1])])
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
a = xr.Dataset({'x': ('x', [1, 2]), 'x': [0, 1]})
a.attrs['attr'] = 'ds'
a.x.attrs['attr'] = 'da'
b = xr.Dataset({'x': ('x', [1, 1]), 'x': [0, 1]})
actual = add(a, b, keep_attrs=False)
assert not actual.attrs
actual = add(a, b, keep_attrs=True)
assert_identical(actual.attrs, a.attrs)
assert_identical(actual.x.attrs, a.x.attrs)
def test_dataset_join():
ds0 = xr.Dataset({'a': ('x', [1, 2]), 'x': [0, 1]})
ds1 = xr.Dataset({'a': ('x', [99, 3]), 'x': [1, 2]})
# by default, cannot have different labels
with raises_regex(ValueError, 'indexes .* are not equal'):
apply_ufunc(operator.add, ds0, ds1)
with raises_regex(TypeError, 'must supply'):
apply_ufunc(operator.add, ds0, ds1, dataset_join='outer')
def add(a, b, join, dataset_join):
return apply_ufunc(operator.add, a, b, join=join,
dataset_join=dataset_join,
dataset_fill_value=np.nan)
actual = add(ds0, ds1, 'outer', 'inner')
expected = xr.Dataset({'a': ('x', [np.nan, 101, np.nan]),
'x': [0, 1, 2]})
assert_identical(actual, expected)
actual = add(ds0, ds1, 'outer', 'outer')
assert_identical(actual, expected)
with raises_regex(ValueError, 'data variable names'):
apply_ufunc(operator.add, ds0, xr.Dataset({'b': 1}))
ds2 = xr.Dataset({'b': ('x', [99, 3]), 'x': [1, 2]})
actual = add(ds0, ds2, 'outer', 'inner')
expected = xr.Dataset({'x': [0, 1, 2]})
assert_identical(actual, expected)
# we used np.nan as the fill_value in add() above
actual = add(ds0, ds2, 'outer', 'outer')
expected = xr.Dataset({'a': ('x', [np.nan, np.nan, np.nan]),
'b': ('x', [np.nan, np.nan, np.nan]),
'x': [0, 1, 2]})
assert_identical(actual, expected)
@requires_dask
def test_apply_dask():
import dask.array as da
array = da.ones((2,), chunks=2)
variable = xr.Variable('x', array)
coords = xr.DataArray(variable).coords.variables
data_array = xr.DataArray(variable, coords, fastpath=True)
dataset = xr.Dataset({'y': variable})
# encountered dask array, but did not set dask='allowed'
with pytest.raises(ValueError):
apply_ufunc(identity, array)
with pytest.raises(ValueError):
apply_ufunc(identity, variable)
with pytest.raises(ValueError):
apply_ufunc(identity, data_array)
with pytest.raises(ValueError):
apply_ufunc(identity, dataset)
# unknown setting for dask array handling
with pytest.raises(ValueError):
apply_ufunc(identity, array, dask='unknown')
def dask_safe_identity(x):
return apply_ufunc(identity, x, dask='allowed')
assert array is dask_safe_identity(array)
actual = dask_safe_identity(variable)
assert isinstance(actual.data, da.Array)
assert_identical(variable, actual)
actual = dask_safe_identity(data_array)
assert isinstance(actual.data, da.Array)
assert_identical(data_array, actual)
actual = dask_safe_identity(dataset)
assert isinstance(actual['y'].data, da.Array)
assert_identical(dataset, actual)
@requires_dask
def test_apply_dask_parallelized_one_arg():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=('x', 'y'))
def parallel_identity(x):
return apply_ufunc(identity, x, dask='parallelized',
output_dtypes=[x.dtype])
actual = parallel_identity(data_array)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
computed = data_array.compute()
actual = parallel_identity(computed)
assert_identical(computed, actual)
@requires_dask
def test_apply_dask_parallelized_two_args():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64)
data_array = xr.DataArray(array, dims=('x', 'y'))
data_array.name = None
def parallel_add(x, y):
return apply_ufunc(operator.add, x, y,
dask='parallelized',
output_dtypes=[np.int64])
def check(x, y):
actual = parallel_add(x, y)
assert isinstance(actual.data, da.Array)
assert actual.data.chunks == array.chunks
assert_identical(data_array, actual)
check(data_array, 0),
check(0, data_array)
check(data_array, xr.DataArray(0))
check(data_array, 0 * data_array)
check(data_array, 0 * data_array[0])
check(data_array[:, 0], 0 * data_array[0])
check(data_array, 0 * data_array.compute())
@requires_dask
def test_apply_dask_parallelized_errors():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=('x', 'y'))
with pytest.raises(NotImplementedError):
apply_ufunc(identity, data_array, output_core_dims=[['z'], ['z']],
dask='parallelized')
with raises_regex(ValueError, 'dtypes'):
apply_ufunc(identity, data_array, dask='parallelized')
with raises_regex(TypeError, 'list'):
apply_ufunc(identity, data_array, dask='parallelized',
output_dtypes=float)
with raises_regex(ValueError, 'must have the same length'):
apply_ufunc(identity, data_array, dask='parallelized',
output_dtypes=[float, float])
with raises_regex(ValueError, 'output_sizes'):
apply_ufunc(identity, data_array, output_core_dims=[['z']],
output_dtypes=[float], dask='parallelized')
with raises_regex(ValueError, 'at least one input is an xarray object'):
apply_ufunc(identity, array, dask='parallelized')
with raises_regex(ValueError, 'consists of multiple chunks'):
apply_ufunc(identity, data_array, dask='parallelized',
output_dtypes=[float],
input_core_dims=[('y',)],
output_core_dims=[('y',)])
@requires_dask
def test_apply_dask_multiple_inputs():
import dask.array as da
def covariance(x, y):
return ((x - x.mean(axis=-1, keepdims=True))
* (y - y.mean(axis=-1, keepdims=True))).mean(axis=-1)
rs = np.random.RandomState(42)
array1 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
array2 = da.from_array(rs.randn(4, 4), chunks=(2, 4))
data_array_1 = xr.DataArray(array1, dims=('x', 'z'))
data_array_2 = xr.DataArray(array2, dims=('y', 'z'))
expected = apply_ufunc(
covariance, data_array_1.compute(), data_array_2.compute(),
input_core_dims=[['z'], ['z']])
allowed = apply_ufunc(
covariance, data_array_1, data_array_2, input_core_dims=[['z'], ['z']],
dask='allowed')
assert isinstance(allowed.data, da.Array)
xr.testing.assert_allclose(expected, allowed.compute())
parallelized = apply_ufunc(
covariance, data_array_1, data_array_2, input_core_dims=[['z'], ['z']],
dask='parallelized', output_dtypes=[float])
assert isinstance(parallelized.data, da.Array)
xr.testing.assert_allclose(expected, parallelized.compute())
@requires_dask
def test_apply_dask_new_output_dimension():
import dask.array as da
array = da.ones((2, 2), chunks=(1, 1))
data_array = xr.DataArray(array, dims=('x', 'y'))
def stack_negative(obj):
def func(x):
return np.stack([x, -x], axis=-1)
return apply_ufunc(func, obj, output_core_dims=[['sign']],
dask='parallelized', output_dtypes=[obj.dtype],
output_sizes={'sign': 2})
expected = stack_negative(data_array.compute())
actual = stack_negative(data_array)
assert actual.dims == ('x', 'y', 'sign')
assert actual.shape == (2, 2, 2)
assert isinstance(actual.data, da.Array)
assert_identical(expected, actual)
def pandas_median(x):
return pd.Series(x).median()
def test_vectorize():
if LooseVersion(np.__version__) < LooseVersion('1.12.0'):
pytest.skip('numpy 1.12 or later to support vectorize=True.')
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=('x', 'y'))
expected = xr.DataArray([1, 2], dims=['x'])
actual = apply_ufunc(pandas_median, data_array,
input_core_dims=[['y']],
vectorize=True)
assert_identical(expected, actual)
@requires_dask
def test_vectorize_dask():
if LooseVersion(np.__version__) < LooseVersion('1.12.0'):
pytest.skip('numpy 1.12 or later to support vectorize=True.')
data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=('x', 'y'))
expected = xr.DataArray([1, 2], dims=['x'])
actual = apply_ufunc(pandas_median, data_array.chunk({'x': 1}),
input_core_dims=[['y']],
vectorize=True,
dask='parallelized',
output_dtypes=[float])
assert_identical(expected, actual)
def test_where():
cond = xr.DataArray([True, False], dims='x')
actual = xr.where(cond, 1, 0)
expected = xr.DataArray([1, 0], dims='x')
assert_identical(expected, actual)
| apache-2.0 |
DmitryYurov/BornAgain | Examples/python/fitting/ex01_BasicExamples/basic_fitting_tutorial.py | 2 | 4021 | """
Fitting example: 4 parameters fit for mixture of cylinders and prisms on top
of substrate.
"""
import bornagain as ba
from bornagain import deg, angstrom, nm
import numpy as np
from matplotlib import pyplot as plt
def get_sample(params):
"""
Returns a sample with uncorrelated cylinders and prisms on a substrate.
"""
cylinder_height = params["cylinder_height"]
cylinder_radius = params["cylinder_radius"]
prism_height = params["prism_height"]
prism_base_edge = params["prism_base_edge"]
# defining materials
m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0)
m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
cylinder_ff = ba.FormFactorCylinder(cylinder_radius, cylinder_height)
cylinder = ba.Particle(m_particle, cylinder_ff)
prism_ff = ba.FormFactorPrism3(prism_base_edge, prism_height)
prism = ba.Particle(m_particle, prism_ff)
layout = ba.ParticleLayout()
layout.addParticle(cylinder, 0.5)
layout.addParticle(prism, 0.5)
# air layer with particles and substrate form multi layer
air_layer = ba.Layer(m_air)
air_layer.addLayout(layout)
substrate_layer = ba.Layer(m_substrate, 0)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation(params):
"""
Returns a GISAXS simulation with beam and detector defined
"""
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(100, -1.0*deg, 1.0*deg,
100, 0.0*deg, 2.0*deg)
simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg)
simulation.setBeamIntensity(1e+08)
simulation.setSample(get_sample(params))
return simulation
def create_real_data():
"""
Generating "experimental" data by running simulation with certain parameters.
The data is saved on disk in the form of numpy array.
"""
# default sample parameters
params = {'cylinder_height': 5.0*nm, 'cylinder_radius': 5.0*nm,
'prism_height': 5.0*nm, 'prism_base_edge': 5.0*nm}
# retrieving simulated data in the form of numpy array
simulation = get_simulation(params)
simulation.runSimulation()
real_data = simulation.result().array()
# spoiling simulated data with noise to produce "real" data
np.random.seed(0)
noise_factor = 0.1
noisy = np.random.normal(real_data, noise_factor*np.sqrt(real_data))
noisy[noisy < 0.1] = 0.1
np.savetxt("basic_fitting_tutorial_data.txt.gz", real_data)
def load_real_data():
"""
Loads experimental data from file
"""
return np.loadtxt("basic_fitting_tutorial_data.txt.gz", dtype=float)
def run_fitting():
"""
Setup simulation and fit
"""
real_data = load_real_data()
fit_objective = ba.FitObjective()
fit_objective.addSimulationAndData(get_simulation, real_data)
# Print fit progress on every n-th iteration.
fit_objective.initPrint(10)
# Plot fit progress on every n-th iteration. Will slow down fit.
fit_objective.initPlot(10)
params = ba.Parameters()
params.add("cylinder_height", 4.*nm, min=0.01)
params.add("cylinder_radius", 6.*nm, min=0.01)
params.add("prism_height", 4.*nm, min=0.01)
params.add("prism_base_edge", 6.*nm, min=0.01)
minimizer = ba.Minimizer()
result = minimizer.minimize(fit_objective.evaluate, params)
fit_objective.finalize(result)
print("Fitting completed.")
print("chi2:", result.minValue())
for fitPar in result.parameters():
print(fitPar.name(), fitPar.value, fitPar.error)
# saving simulation image corresponding to the best fit parameters
# np.savetxt("data.txt", fit_objective.simulationResult().array())
if __name__ == '__main__':
# uncomment line below to regenerate "experimental" data file
# create_real_data()
run_fitting()
plt.show()
| gpl-3.0 |
diegocavalca/Studies | phd-thesis/nilmtk/nilmtk/disaggregate/hart_85.py | 1 | 22673 | from __future__ import print_function, division
import pickle
from collections import OrderedDict, deque
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from nilmtk.feature_detectors.cluster import hart85_means_shift_cluster
from nilmtk.feature_detectors.steady_states import find_steady_states_transients
from nilmtk.disaggregate import Disaggregator
class MyDeque(deque):
def popmiddle(self, pos):
self.rotate(-pos)
ret = self.popleft()
self.rotate(pos)
return ret
class PairBuffer(object):
"""
Attributes:
* transitionList (list of tuples)
* matchedPairs (dataframe containing matched pairs of transitions)
"""
def __init__(self, columns, buffer_size, min_tolerance, percent_tolerance,
large_transition, num_measurements):
"""
Parameters
----------
buffer_size: int, optional
size of the buffer to use for finding edges
min_tolerance: int, optional
variance in power draw allowed for pairing a match
percent_tolerance: float, optional
if transition is greater than large_transition, then use percent of large_transition
large_transition: float, optional
power draw of a Large transition
num_measurements: int, optional
2 if only active power
3 if both active and reactive power
"""
# We use a deque here, because it allows us quick access to start and end popping
# and additionally, we can set a maxlen which drops oldest items. This nicely
# suits Hart's recomendation that the size should be tunable.
self._buffer_size = buffer_size
self._min_tol = min_tolerance
self._percent_tol = percent_tolerance
self._large_transition = large_transition
self.transition_list = MyDeque([], maxlen=self._buffer_size)
self._num_measurements = num_measurements
if self._num_measurements == 3:
# Both active and reactive power is available
self.pair_columns = ['T1 Time', 'T1 Active', 'T1 Reactive',
'T2 Time', 'T2 Active', 'T2 Reactive']
elif self._num_measurements == 2:
# Only active power is available
if columns[0][1] == 'active':
self.pair_columns = ['T1 Time', 'T1 Active',
'T2 Time', 'T2 Active']
elif columns[0][1] == 'apparent':
self.pair_columns = ['T1 Time', 'T1 Apparent',
'T2 Time', 'T2 Apparent']
self.matched_pairs = pd.DataFrame(columns=self.pair_columns)
def clean_buffer(self):
# Remove any matched transactions
for idx, entry in enumerate(self.transition_list):
if entry[self._num_measurements]:
self.transition_list.popmiddle(idx)
self.clean_buffer()
break
# Remove oldest transaction if buffer cleaning didn't remove anything
# if len(self.transitionList) == self._bufferSize:
# self.transitionList.popleft()
def add_transition(self, transition):
# Check transition is as expected.
assert isinstance(transition, (tuple, list))
# Check that we have both active and reactive powers.
assert len(transition) == self._num_measurements
# Convert as appropriate
if isinstance(transition, tuple):
mtransition = list(transition)
# Add transition to List of transitions (set marker as unpaired)
mtransition.append(False)
self.transition_list.append(mtransition)
# checking for pairs
# self.pairTransitions()
# self.cleanBuffer()
def pair_transitions(self):
"""
Hart 85, P 33.
The algorithm must not allow an 0N transition to match an OFF which occurred at the end
of a different cycle, so that only ON/OFF pairs which truly belong
together are paired up. Otherwise the energy consumption of the
appliance will be greatly overestimated.
Hart 85, P 32.
For the two-state load monitor, a pair is defined as two entries
which meet the following four conditions:
(1) They are on the same leg, or are both 240 V,
(2) They are both unmarked,
(3) The earlier has a positive real power component, and
(4) When added together, they result in a vector in which the
absolute value of the real power component is less than 35
Watts (or 3.5% of the real power, if the transitions are
over 1000 W) and the absolute value of the reactive power
component is less than 35 VAR (or 3.5%).
"""
tlength = len(self.transition_list)
pairmatched = False
if tlength < 2:
return pairmatched
# Can we reduce the running time of this algorithm?
# My gut feeling is no, because we can't re-order the list...
# I wonder if we sort but then check the time... maybe. TO DO
# (perhaps!).
new_matched_pairs = []
# Start the element distance at 1, go up to current length of buffer
for eDistance in range(1, tlength):
idx = 0
while idx < tlength - 1:
# We don't want to go beyond length of array
compindex = idx + eDistance
if compindex < tlength:
val = self.transition_list[idx]
# val[1] is the active power and
# val[self._num_measurements] is match status
if (val[1] > 0) and (val[self._num_measurements] is False):
compval = self.transition_list[compindex]
if compval[self._num_measurements] is False:
# Add the two elements for comparison
vsum = np.add(
val[1:self._num_measurements],
compval[1:self._num_measurements])
# Set the allowable tolerance for reactive and
# active
matchtols = [self._min_tol, self._min_tol]
for ix in range(1, self._num_measurements):
matchtols[ix - 1] = (
self._min_tol
if (max(np.fabs([val[ix], compval[ix]])) < self._large_transition)
else (self._percent_tol * max(np.fabs([val[ix], compval[ix]])))
)
if self._num_measurements == 3:
condition = (
np.fabs(
vsum[0]) < matchtols[0]) and (
np.fabs(
vsum[1]) < matchtols[1])
elif self._num_measurements == 2:
condition = np.fabs(vsum[0]) < matchtols[0]
if condition:
# Mark the transition as complete
self.transition_list[idx][self._num_measurements] = True
self.transition_list[compindex][self._num_measurements] = True
pairmatched = True
# Append the OFF transition to the ON. Add to
# the list.
matchedpair = val[0:self._num_measurements] + \
compval[0:self._num_measurements]
new_matched_pairs.append(matchedpair)
# Iterate Index
idx += 1
else:
break
# Process new pairs in a single operation (faster than growing the
# dataframe)
if pairmatched:
if self.matched_pairs.empty:
self.matched_pairs = pd.DataFrame(
new_matched_pairs, columns=self.pair_columns)
else:
self.matched_pairs = self.matched_pairs.append(
pd.DataFrame(new_matched_pairs, columns=self.pair_columns))
return pairmatched
class Hart85(Disaggregator):
"""1 or 2 dimensional Hart 1985 algorithm.
Attributes
----------
model : dict
Each key is either the instance integer for an ElecMeter,
or a tuple of instances for a MeterGroup.
Each value is a sorted list of power in different states.
"""
def __init__(self):
self.model = {}
self.MODEL_NAME = "Hart85"
def train(
self,
metergroup,
columns=[
('power',
'active')],
buffer_size=20,
noise_level=70,
state_threshold=15,
min_tolerance=100,
percent_tolerance=0.035,
large_transition=1000,
**kwargs):
"""
Train using Hart85. Places the learnt model in `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
columns: nilmtk.Measurement, should be one of the following
[('power','active')]
[('power','apparent')]
[('power','reactive')]
[('power','active'), ('power', 'reactive')]
buffer_size: int, optional
size of the buffer to use for finding edges
min_tolerance: int, optional
variance in power draw allowed for pairing a match
percent_tolerance: float, optional
if transition is greater than large_transition,
then use percent of large_transition
large_transition: float, optional
power draw of a Large transition
"""
self.columns = columns
self.state_threshold = state_threshold
self.noise_level = noise_level
[self.steady_states, self.transients] = find_steady_states_transients(
metergroup, columns, noise_level, state_threshold, **kwargs)
self.pair_df = self.pair(
buffer_size, min_tolerance, percent_tolerance, large_transition)
self.centroids = hart85_means_shift_cluster(self.pair_df, columns)
self.model = dict(
columns=columns,
state_threshold=state_threshold,
noise_level=noise_level,
steady_states=self.steady_states,
transients=self.transients,
# pair_df=self.pair_df,
centroids=self.centroids
)
def pair(self, buffer_size, min_tolerance, percent_tolerance,
large_transition):
subset = list(self.transients.itertuples())
buffer = PairBuffer(
columns=self.columns,
min_tolerance=min_tolerance,
buffer_size=buffer_size,
percent_tolerance=percent_tolerance,
large_transition=large_transition,
num_measurements=len(
self.transients.columns) + 1)
for s in subset:
# if len(buffer.transitionList) < bsize
if len(buffer.transition_list) == buffer_size:
buffer.clean_buffer()
buffer.add_transition(s)
buffer.pair_transitions()
return buffer.matched_pairs
def disaggregate_chunk(self, chunk, prev, transients):
"""
Parameters
----------
chunk : pd.DataFrame
mains power
prev
transients : returned by find_steady_state_transients
Returns
-------
states : pd.DataFrame
with same index as `chunk`.
"""
states = pd.DataFrame(
-1, index=chunk.index, columns=self.centroids.index.values)
for transient_tuple in transients.itertuples():
if transient_tuple[0] < chunk.index[0]:
# Transient occurs before chunk has started; do nothing
pass
elif transient_tuple[0] > chunk.index[-1]:
# Transient occurs after chunk has ended; do nothing
pass
else:
# Absolute value of transient
abs_value = np.abs(transient_tuple[1:])
positive = transient_tuple[1] > 0
abs_value_transient_minus_centroid = pd.DataFrame(
(self.centroids - abs_value).abs())
if len(transient_tuple) == 2:
# 1d data
index_least_delta = (
abs_value_transient_minus_centroid.idxmin().values[0])
else:
# 2d data.
# Need to find absolute value before computing minimum
columns = abs_value_transient_minus_centroid.columns
abs_value_transient_minus_centroid["multidim"] = (
abs_value_transient_minus_centroid[columns[0]] ** 2
+
abs_value_transient_minus_centroid[columns[1]] ** 2)
index_least_delta = (
abs_value_transient_minus_centroid["multidim"].idxmin())
if positive:
# Turned on
states.loc[transient_tuple[0]][index_least_delta] = 1
else:
# Turned off
states.loc[transient_tuple[0]][index_least_delta] = 0
prev = states.iloc[-1].to_dict()
power_chunk_dict = self.assign_power_from_states(states, prev)
self.power_dict = power_chunk_dict
self.chunk_index = chunk.index
# Check whether 1d data or 2d data and converting dict to dataframe
if len(transient_tuple) == 2:
temp_df = pd.DataFrame(power_chunk_dict, index=chunk.index)
return temp_df, 2
else:
tuples = []
for i in range(len(self.centroids.index.values)):
for j in range(0, 2):
tuples.append([i, j])
columns = pd.MultiIndex.from_tuples(tuples)
temp_df = pd.DataFrame(
power_chunk_dict,
index=chunk.index,
columns=columns)
for i in range(len(chunk.index)):
for j in range(len(self.centroids.index.values)):
for k in range(0, 2):
temp_df.iloc[i][j][k] = power_chunk_dict[j][i][k]
return temp_df, 3
def assign_power_from_states(self, states_chunk, prev):
di = {}
ndim = len(self.centroids.columns)
for appliance in states_chunk.columns:
values = states_chunk[[appliance]].values.flatten()
if ndim == 1:
power = np.zeros(len(values), dtype=int)
else:
power = np.zeros((len(values), 2), dtype=int)
# on = False
i = 0
while i < len(values) - 1:
if values[i] == 1:
# print("A", values[i], i)
on = True
i = i + 1
power[i] = self.centroids.loc[appliance].values
while values[i] != 0 and i < len(values) - 1:
# print("B", values[i], i)
power[i] = self.centroids.loc[appliance].values
i = i + 1
elif values[i] == 0:
# print("C", values[i], i)
on = False
i = i + 1
power[i] = 0
while values[i] != 1 and i < len(values) - 1:
# print("D", values[i], i)
if ndim == 1:
power[i] = 0
else:
power[i] = [0, 0]
i = i + 1
else:
# print("E", values[i], i)
# Unknown state. If previously we know about this
# appliance's state, we can
# use that. Else, it defaults to 0
if prev[appliance] == -1 or prev[appliance] == 0:
# print("F", values[i], i)
on = False
power[i] = 0
while values[i] != 1 and i < len(values) - 1:
# print("G", values[i], i)
if ndim == 1:
power[i] = 0
else:
power[i] = [0, 0]
i = i + 1
else:
# print("H", values[i], i)
on = True
power[i] = self.centroids.loc[appliance].values
while values[i] != 0 and i < len(values) - 1:
# print("I", values[i], i)
power[i] = self.centroids.loc[appliance].values
i = i + 1
di[appliance] = power
# print(power.sum())
return di
def disaggregate(self, mains, output_datastore, **load_kwargs):
"""Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
sample_period : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
"""
load_kwargs = self._pre_disaggregation_checks(load_kwargs)
load_kwargs.setdefault('sample_period', 60)
load_kwargs.setdefault('sections', mains.good_sections())
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = building_path + '/elec/meter1'
data_is_available = False
[_, transients] = find_steady_states_transients(
mains, columns=self.columns, state_threshold=self.state_threshold,
noise_level=self.noise_level, **load_kwargs)
# For now ignoring the first transient
# transients = transients[1:]
# Initially all appliances/meters are in unknown state (denoted by -1)
prev = OrderedDict()
learnt_meters = self.centroids.index.values
for meter in learnt_meters:
prev[meter] = -1
timeframes = []
# Now iterating over mains data and disaggregating chunk by chunk
if len(self.columns) == 1:
ac_type = self.columns[0][1]
else:
ac_type = ['active', 'reactive']
for chunk in mains.power_series(**load_kwargs):
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
power_df, dimen = self.disaggregate_chunk(
chunk, prev, transients)
if dimen == 2:
columns = pd.MultiIndex.from_tuples([chunk.name])
else:
tuples = list(self.columns)
columns = pd.MultiIndex.from_tuples(tuples)
for meter in learnt_meters:
data_is_available = True
df = power_df[[meter]]
df.columns = columns
df.columns.names = ['physical_quantity', 'type']
key = '{}/elec/meter{:d}'.format(building_path, meter + 2)
val = df.apply(pd.to_numeric).astype('float32')
output_datastore.append(key, value=val)
print('Next Chunk..')
print('Appending mains data to datastore')
for chunk_mains in mains.load(ac_type=ac_type):
chunk_df = chunk_mains
chunk_df = chunk_df.apply(pd.to_numeric).astype('float32')
print('Done')
output_datastore.append(key=mains_data_location,
value=chunk_df)
# save metadata
if data_is_available:
self._save_metadata_for_disaggregation(
output_datastore=output_datastore,
sample_period=load_kwargs['sample_period'],
measurement=measurement,
timeframes=timeframes,
building=mains.building(),
supervised=False,
num_meters=len(self.centroids)
)
return power_df
def export_model(self, filename):
example_dict = self.model
with open(filename, "wb") as pickle_out:
pickle.dump(example_dict, pickle_out)
def import_model(self, filename):
with open(filename, "rb") as pickle_in:
self.model = pickle.load(pickle_in)
self.columns = self.model['columns']
self.state_threshold = self.model['state_threshold']
self.noise_level = self.model['noise_level']
self.steady_states = self.model['steady_states']
self.transients = self.model['transients']
# pair_df=self.pair_df,
self.centroids = self.model['centroids']
def best_matched_appliance(self, submeters, pred_df):
"""
Parameters
----------
submeters : elec.submeters object
pred_df : predicted dataframe returned by disaggregate()
Returns
-------
list : containing best matched pairs to disaggregated output
"""
rms_error = {}
submeters_df = submeters.dataframe_of_meters()
new_df = pd.merge(
pred_df,
submeters_df,
left_index=True,
right_index=True)
rmse_all = []
for pred_appliance in pred_df.columns:
rmse = {}
for appliance in submeters_df.columns:
temp_value = (
np.sqrt(
mean_squared_error(
new_df[pred_appliance],
new_df[appliance])))
rmse[appliance] = temp_value
rmse_all.append(rmse)
match = []
for i in range(len(rmse_all)):
key_min = min(rmse_all[i].keys(), key=(lambda k: rmse_all[i][k]))
print('Best Matched Pair is', (i, key_min))
| cc0-1.0 |
bnaul/scikit-learn | sklearn/conftest.py | 8 | 1292 | import os
import pytest
from threadpoolctl import threadpool_limits
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
@pytest.fixture(scope='function')
def pyplot():
"""Setup and teardown fixture for matplotlib.
This fixture checks if we can import matplotlib. If not, the tests will be
skipped. Otherwise, we setup matplotlib backend and close the figures
after running the functions.
Returns
-------
pyplot : module
The ``matplotlib.pyplot`` module.
"""
matplotlib = pytest.importorskip('matplotlib')
matplotlib.use('agg')
pyplot = pytest.importorskip('matplotlib.pyplot')
yield pyplot
pyplot.close('all')
def pytest_runtest_setup(item):
"""Set the number of openmp threads based on the number of workers
xdist is using to prevent oversubscription.
Parameters
----------
item : pytest item
item to be processed
"""
try:
xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
except KeyError:
# raises when pytest-xdist is not installed
return
openmp_threads = _openmp_effective_n_threads()
threads_per_worker = max(openmp_threads // xdist_worker_count, 1)
threadpool_limits(threads_per_worker, user_api='openmp')
| bsd-3-clause |
shenzebang/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
dpshelio/sunpy | sunpy/timeseries/sources/eve.py | 1 | 12232 | import os
import codecs
from os.path import basename
from datetime import datetime
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates
from pandas import DataFrame
from pandas.io.parsers import read_csv
import astropy.units as u
from astropy.time import TimeDelta
import sunpy.io
from sunpy.time import parse_time
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.util.metadata import MetaDict
from sunpy.visualization import peek_show
__all__ = ['EVESpWxTimeSeries', 'ESPTimeSeries']
class ESPTimeSeries(GenericTimeSeries):
"""
SDO EVE/ESP Level 1 data.
The Extreme ultraviolet Spectro-Photometer (ESP) is an irradiance instrument
which is part of the Extreme ultraviolet Variability Experiment (EVE) onboard
SDO. ESP provides high time cadence (0.25s) EUV irradiance measurements in five
channels, one soft X-ray and 4 EUV. The first four orders of the diffraction grating
gives measurements centered on 18nm, 26nm, 30nm and 36nm. The zeroth order (obtained
by 4 photodiodes) provides the soft X-ray measurements from 0.1-7nm.
The ESP level 1 fits files are fully calibrated. The TimeSeries object created from
an ESP fits file will conatain 4 columns namely:
* 'QD' - sum of 4 quad diodes, this is the soft X-ray measurements 0.1-7nm
* 'CH_18' - EUV irradiance 18nm
* 'CH_26' - EUV irradiance 26nm
* 'CH_30' - EUV irradiance 30nm
* 'CH_36' - EUV irradiance 36nm
References
----------
* `SDO Mission Homepage <https://sdo.gsfc.nasa.gov/>`__
* `EVE Homepage <http://lasp.colorado.edu/home/eve/>`__
* `README ESP data <http://lasp.colorado.edu/eve/data_access/evewebdata/products/level1/esp/EVE_ESP_L1_V6_README.pdf>`__
* `ESP lvl1 data <http://lasp.colorado.edu/eve/data_access/evewebdata/misc/eve_calendars/calendar_level1_2018.html>`__
* `ESP instrument paper <https://doi.org/10.1007/s11207-009-9485-8>`__
Notes
-----
The 36nm channel demonstrates a significant noise and it is not recommended to be
used for short-time observations of solar irradiance.
"""
_source = 'esp'
@peek_show
def peek(self, title='EVE/ESP Level1', **kwargs):
self._validate_data_for_ploting()
names = ('Flux \n 0.1-7nm', 'Flux \n 18nm', 'Flux \n 26nm', 'Flux \n 30nm', 'Flux \n 36nm')
figure = plt.figure()
axes = plt.gca()
axes = self.data.plot(ax=axes, subplots=True, sharex=True, **kwargs)
plt.xlim(self.data.index[0], self.data.index[-1])
axes[0].set_title(title)
for i, ax in enumerate(axes):
ax.set_ylabel(names[i])
ax.legend(loc='upper right')
axes[-1].set_xlabel('Time (UT) ' + str(self.data.index[0])[0:11])
axes[-1].xaxis.set_major_formatter(dates.DateFormatter('%H:%M'))
plt.tight_layout()
plt.subplots_adjust(hspace=0.05)
return figure
@classmethod
def _parse_file(cls, filepath):
"""
Parses a EVE ESP level 1 data.
"""
hdus = sunpy.io.read_file(filepath)
return cls._parse_hdus(hdus)
@classmethod
def _parse_hdus(cls, hdulist):
header = MetaDict(OrderedDict(hdulist[0].header))
# Adding telescope to MetaData
header.update({'TELESCOP': hdulist[1].header['TELESCOP'].split()[0]})
start_time = parse_time(hdulist[1].header['T_OBS'])
times = start_time + TimeDelta(hdulist[1].data['SOD']*u.second)
colnames = ['QD', 'CH_18', 'CH_26', 'CH_30', 'CH_36']
all_data = [hdulist[1].data[x] for x in colnames]
data = DataFrame(np.array(all_data).T, index=times.isot.astype('datetime64'), columns=colnames)
data.sort_index(inplace=True)
units = OrderedDict([('QD', u.W/u.m**2),
('CH_18', u.W/u.m**2),
('CH_26', u.W/u.m**2),
('CH_30', u.W/u.m**2),
('CH_36', u.W/u.m**2)])
return data, header, units
@classmethod
def is_datasource_for(cls, **kwargs):
"""
Determines if header corresponds to an EVE image.
"""
if kwargs.get('source', ''):
return kwargs.get('source', '').lower().startswith(cls._source)
if 'meta' in kwargs.keys():
return kwargs['meta'].get('TELESCOP', '').endswith('SDO/EVE')
class EVESpWxTimeSeries(GenericTimeSeries):
"""
SDO EVE LightCurve for level 0CS data.
The Extreme Ultraviolet Variability Experiment (EVE) is an instrument on board the Solar Dynamics Observatory (SDO).
The EVE instrument is designed to measure the solar extreme ultraviolet (EUV) irradiance.
The EUV radiation includes the 0.1-105 nm range, which provides the majority
of the energy for heating Earth’s thermosphere and creating Earth’s ionosphere (charged plasma).
EVE includes several irradiance instruments:
* The Multiple EUV Grating Spectrographs (MEGS)-A is a grazing- incidence spectrograph
that measures the solar EUV irradiance in the 5 to 37 nm range with 0.1-nm resolution,
* The MEGS-B is a normal-incidence, dual-pass spectrograph that measures the solar EUV
irradiance in the 35 to 105 nm range with 0.1-nm resolution.
Level 0CS data is primarily used for space weather.
It is provided near real-time and is crudely calibrated 1-minute averaged broadband irradiances from ESP and MEGS-P broadband.
For other levels of EVE data, use `~sunpy.net.Fido`, with `sunpy.net.attrs.Instrument('eve')`.
Data is available starting on 2010/03/01.
Examples
--------
>>> import sunpy.timeseries
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> eve = sunpy.timeseries.TimeSeries(sunpy.data.sample.EVE_TIMESERIES, source='EVE') # doctest: +REMOTE_DATA
>>> eve = sunpy.timeseries.TimeSeries("http://lasp.colorado.edu/eve/data_access/quicklook/quicklook_data/L0CS/LATEST_EVE_L0CS_DIODES_1m.txt", source='EVE') # doctest: +REMOTE_DATA
>>> eve.peek(subplots=True) # doctest: +SKIP
References
----------
* `SDO Mission Homepage <https://sdo.gsfc.nasa.gov/>`__
* `EVE Homepage <http://lasp.colorado.edu/home/eve/>`__
* `Level 0CS Definition <http://lasp.colorado.edu/home/eve/data/>`__
* `EVE Data Acess <http://lasp.colorado.edu/home/eve/data/data-access/>`__
* `Instrument Paper <https://doi.org/10.1007/s11207-009-9487-6>`__
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = 'eve'
@peek_show
def peek(self, column=None, **kwargs):
"""
Plots the time series in a new figure. An example is shown below:
.. plot::
import sunpy.timeseries
from sunpy.data.sample import EVE_TIMESERIES
eve = sunpy.timeseries.TimeSeries(EVE_TIMESERIES, source='eve')
eve.peek(subplots=True)
Parameters
----------
column : `str`, optional
The column to display. Defaults to `None`, so it will display all.
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
figure = plt.figure()
# Choose title if none was specified
if "title" not in kwargs and column is None:
if len(self.data.columns) > 1:
kwargs['title'] = 'EVE (1 minute data)'
else:
if self._filename is not None:
base = self._filename.replace('_', ' ')
kwargs['title'] = os.path.splitext(base)[0]
else:
kwargs['title'] = 'EVE Averages'
if column is None:
self.plot(**kwargs)
else:
data = self.data[column]
if "title" not in kwargs:
kwargs['title'] = 'EVE ' + column.replace('_', ' ')
data.plot(**kwargs)
return figure
@classmethod
def _parse_file(cls, filepath):
"""
Parses an EVE CSV file.
"""
cls._filename = basename(filepath)
with codecs.open(filepath, mode='rb', encoding='ascii') as fp:
# Determine type of EVE CSV file and parse
line1 = fp.readline()
fp.seek(0)
if line1.startswith("Date"):
return cls._parse_average_csv(fp)
elif line1.startswith(";"):
return cls._parse_level_0cs(fp)
@staticmethod
def _parse_average_csv(fp):
"""
Parses an EVE Averages file.
"""
return "", read_csv(fp, sep=",", index_col=0, parse_dates=True)
@staticmethod
def _parse_level_0cs(fp):
"""
Parses and EVE Level 0CS file.
"""
is_missing_data = False # boolean to check for missing data
missing_data_val = np.nan
header = []
fields = []
line = fp.readline()
# Read header at top of file
while line.startswith(";"):
header.append(line)
if '; Missing data:' in line:
is_missing_data = True
missing_data_val = line.split(':')[1].strip()
line = fp.readline()
meta = MetaDict()
for hline in header:
if hline == '; Format:\n' or hline == '; Column descriptions:\n':
continue
elif ('Created' in hline) or ('Source' in hline):
meta[hline.split(':',
1)[0].replace(';',
' ').strip()] = hline.split(':', 1)[1].strip()
elif ':' in hline:
meta[hline.split(':')[0].replace(';', ' ').strip()] = hline.split(':')[1].strip()
fieldnames_start = False
for hline in header:
if hline.startswith("; Format:"):
fieldnames_start = False
if fieldnames_start:
fields.append(hline.split(":")[0].replace(';', ' ').strip())
if hline.startswith("; Column descriptions:"):
fieldnames_start = True
# Next line is YYYY DOY MM DD
date_parts = line.split(" ")
year = int(date_parts[0])
month = int(date_parts[2])
day = int(date_parts[3])
def parser(x):
# Parse date column (HHMM)
return datetime(year, month, day, int(x[0:2]), int(x[2:4]))
data = read_csv(fp, sep=r"\s+", names=fields,
index_col=0, date_parser=parser, header=None, engine='python')
if is_missing_data: # If missing data specified in header
data[data == float(missing_data_val)] = np.nan
# Add the units data
units = OrderedDict([('XRS-B proxy', u.W/u.m**2),
('XRS-A proxy', u.W/u.m**2),
('SEM proxy', u.W/u.m**2),
('0.1-7ESPquad', u.W/u.m**2),
('17.1ESP', u.W/u.m**2),
('25.7ESP', u.W/u.m**2),
('30.4ESP', u.W/u.m**2),
('36.6ESP', u.W/u.m**2),
('darkESP', u.ct),
('121.6MEGS-P', u.W/u.m**2),
('darkMEGS-P', u.ct),
('q0ESP', u.dimensionless_unscaled),
('q1ESP', u.dimensionless_unscaled),
('q2ESP', u.dimensionless_unscaled),
('q3ESP', u.dimensionless_unscaled),
('CMLat', u.deg),
('CMLon', u.deg)])
# Todo: check units used.
return data, meta, units
@classmethod
def is_datasource_for(cls, **kwargs):
"""
Determines if header corresponds to an EVE image.
"""
if kwargs.get('source', ''):
return kwargs.get('source', '').lower().startswith(cls._source)
| bsd-2-clause |
madcowswe/ODrive | analysis/Simulation/MotorSim.py | 1 | 7267 | # this file is for the simulation of a 3-phase synchronous motor
import numpy as np
import scipy as sp
import scipy.signal as signal
import scipy.integrate
import matplotlib.pyplot as plt
import time
def sign(num):
if num > 0:
return 1
elif num < 0:
return -1
else:
return 0
C = np.array([0, 1/5, 3/10, 4/5, 8/9, 1])
A = np.array([
[0, 0, 0, 0, 0],
[1/5, 0, 0, 0, 0],
[3/40, 9/40, 0, 0, 0],
[44/45, -56/15, 32/9, 0, 0],
[19372/6561, -25360/2187, 64448/6561, -212/729, 0],
[9017/3168, -355/33, 46732/5247, 49/176, -5103/18656]
])
B = np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84])
# rk_step from scipy.integrate rk.py
def rk_step(fun, t, y, f, h, A, B, C, K):
"""Perform a single Runge-Kutta step.
This function computes a prediction of an explicit Runge-Kutta method and
also estimates the error of a less accurate method.
Notation for Butcher tableau is as in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Current value of the derivative, i.e., ``fun(x, y)``.
h : float
Step to use.
A : ndarray, shape (n_stages, n_stages)
Coefficients for combining previous RK stages to compute the next
stage. For explicit methods the coefficients at and above the main
diagonal are zeros.
B : ndarray, shape (n_stages,)
Coefficients for combining RK stages for computing the final
prediction.
C : ndarray, shape (n_stages,)
Coefficients for incrementing time for consecutive RK stages.
The value for the first stage is always zero.
K : ndarray, shape (n_stages + 1, n)
Storage array for putting RK stages here. Stages are stored in rows.
The last row is a linear combination of the previous rows with
coefficients
Returns
-------
y_new : ndarray, shape (n,)
Solution at t + h computed with a higher accuracy.
f_new : ndarray, shape (n,)
Derivative ``fun(t + h, y_new)``.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
K[0] = f
for s, (a, c) in enumerate(zip(A[1:], C[1:]), start=1):
dy = np.dot(K[:s].T, a[:s]) * h
K[s] = fun(t + c * h, y + dy)
y_new = y + h * np.dot(K[:-1].T, B)
f_new = fun(t + h, y_new)
K[-1] = f_new
return y_new, f_new
# example params for d5065 motor
# phase_R = 0.039 Ohms
# phase_L = 0.0000157 H
# pole_pairs = 7
# KV = 270
# J = 1e-4
# b_coulomb = 0.001
# b_viscous = 0.001
class motor_pmsm_mechanical:
def __init__(self, J, b_coulomb, b_viscous):
# J is moment of inertia
# b_coulomb is coulomb friction coefficient
# b_viscous is viscous friction coefficient
self.J = J
self.b_c = b_coulomb
self.b_v = b_viscous
def diff_eqs(self, t, y, torque):
theta = y[0]
theta_dot = y[1]
theta_ddot = (1/self.J) * (torque - self.b_v * theta_dot - self.b_c * sign(theta_dot))
return np.array([theta_dot, theta_ddot])
def inverter(vbus, timings, current):
# this function should take the relevant inputs and output voltages in dq reference frame.
pass
class motor:
def __init__(self, J, b_coulomb, b_viscous, R, L_q, L_d, KV, pole_pairs, dT):
self.dT = dT
self.b_coulomb = b_coulomb
self.b_viscous = b_viscous
self.KV = KV
self.pole_pairs = pole_pairs
kt = 8.27/KV
self.lambda_m = 2*kt/(3*pole_pairs) #speed constant in Vs/rad (electrical rad)
self.R = R
self.L_q = L_q
self.L_d = L_d
self.J = J
# state variables for motor
self.theta = 0 # mechanical!
self.theta_dot = 0 # mechanical!
self.I_d = 0
self.I_q = 0
# K matrix. For integrator?
# np.empty((self.n_stages + 1, self.n_stages), dtype=self.y.dtype)
self.K = np.empty((7, 4))
def simulate(self, t, u, x0):
# t is timesteps [t0, t1, ...]
# u is [T_load, V_d, V_q]
# x0 is initial states, [theta, theta_dot, I_d, I_q]
(self.theta, self.theta_dot, self.I_d, self.I_q) = x0
time = []
pos = []
vel = []
I_d = []
I_q = []
for i in range(len(t)):
self.single_step_rk(u[2],u[1],u[0])
time.append(i*self.dT)
pos.append(self.theta)
vel.append(self.theta_dot)
I_d.append(self.I_d)
I_q.append(self.I_q)
return [time,pos,vel,I_d,I_q]
def inputs(self, V_q, V_d, T_load):
self.V_q = V_q
self.V_d = V_d
self.T_load = T_load
def diff_eqs(self, t, y):
# inputs are self.V_q, self.V_d, self.T_load
# state is y, y = [theta, theta_dot, I_d, I_q]
# set_inputs must be called before this if the inputs have changed.
theta = y[0]
theta_dot = y[1]
I_d = y[2]
I_q = y[3]
torque = 3*self.pole_pairs/2 * (self.lambda_m * I_q + (self.L_d - self.L_q)*I_d*I_q) - self.T_load
if theta_dot == 0 and -1*self.b_coulomb < torque < self.b_coulomb:
torque = 0
# theta_dot = theta_dot, no ode here
theta_ddot = (1/self.J) * (torque - self.b_viscous * theta_dot - self.b_coulomb * sign(theta_dot))
I_d_dot = self.V_d / self.L_d - self.R / self.L_d * I_d + theta_dot*self.pole_pairs * self.L_q / self.L_d * I_q
I_q_dot = self.V_q / self.L_q - self.R / self.L_q * I_q - theta_dot*self.pole_pairs * self.L_d / self.L_q * I_d - theta_dot*self.pole_pairs * self.lambda_m / self.L_q
return np.array([theta_dot, theta_ddot, I_d_dot, I_q_dot])
def single_step_rk(self, V_q, V_d, T_load):
# given inputs
self.inputs(V_q, V_d, T_load)
x = (d5065.theta, d5065.theta_dot, d5065.I_d, d5065.I_q)
((d5065.theta, d5065.theta_dot, d5065.I_d, d5065.I_q), _) = rk_step(d5065.diff_eqs, 0, x, d5065.diff_eqs(0, x), d5065.dT, A, B, C, d5065.K)
if __name__ == "__main__":
d5065 = motor(J = 1e-4, b_coulomb = 0, b_viscous = 0.01, R = 0.039, L_q = 1.57e-5, L_d = 1.57e-5, KV = 270, pole_pairs = 7, dT = 1/48000)
x0 = [0,0,0,0] # initial state of theta, theta_dot, I_d, I_q
u = [0,0,1] # input for simulation as [T_load, V_d, V_q]
t = [i*1/48000 for i in range(12000)] # half second of runtime at Fs=48kHz
data = d5065.simulate(t=t, u=u, x0=x0)
dT = 1/48000
states = []
pos = []
vel = []
I_d = []
I_q = []
pos = data[1]
vel = data[2]
I_d = data[3]
I_q = data[4]
fig, axs = plt.subplots(4)
axs[0].plot(t, pos)
axs[0].set_title('pos')
axs[0].set_ylabel('Theta (eRad)')
axs[1].plot(t, vel)
axs[1].set_title('vel')
axs[1].set_ylabel('Omega (eRad/s)')
axs[2].plot(t,I_d)
axs[2].set_title('I_d')
axs[2].set_ylabel('Current (A)')
axs[3].plot(t,I_q)
axs[3].set_title('I_q')
axs[3].set_ylabel('Current (A)')
axs[3].set_xlabel('time (s)')
plt.show() | mit |
NelisVerhoef/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
AlexanderFabisch/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
heli522/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
dungvtdev/upsbayescpm | pgmpy/estimators/BdeuScore.py | 6 | 3421 | #!/usr/bin/env python
from math import lgamma
from pgmpy.estimators import StructureScore
class BdeuScore(StructureScore):
def __init__(self, data, equivalent_sample_size=10, **kwargs):
"""
Class for Bayesian structure scoring for BayesianModels with Dirichlet priors.
The BDeu score is the result of setting all Dirichlet hyperparameters/pseudo_counts to
`equivalent_sample_size/variable_cardinality`.
The `score`-method measures how well a model is able to describe the given data set.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
equivalent_sample_size: int (default: 10)
The equivalent/imaginary sample size (of uniform pseudo samples) for the dirichlet hyperparameters.
The score is sensitive to this value, runs with different values might be useful.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states (or values)
that the variable can take. If unspecified, the observed values in the data set
are taken to be the only possible states.
complete_samples_only: bool (optional, default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
This sets the behavior of the `state_count`-method.
References
---------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3.4-18.3.6 (esp. page 806)
[2] AM Carvalho, Scoring functions for learning Bayesian networks,
http://www.lx.it.pt/~asmc/pub/talks/09-TA/ta_pres.pdf
"""
self.equivalent_sample_size = equivalent_sample_size
super(BdeuScore, self).__init__(data, **kwargs)
def local_score(self, variable, parents):
"Computes a score that measures how much a \
given variable is \"influenced\" by a given list of potential parents."
var_states = self.state_names[variable]
var_cardinality = len(var_states)
state_counts = self.state_counts(variable, parents)
num_parents_states = float(len(state_counts.columns))
score = 0
for parents_state in state_counts: # iterate over df columns (only 1 if no parents)
conditional_sample_size = sum(state_counts[parents_state])
score += (lgamma(self.equivalent_sample_size / num_parents_states) -
lgamma(conditional_sample_size + self.equivalent_sample_size / num_parents_states))
for state in var_states:
if state_counts[parents_state][state] > 0:
score += (lgamma(state_counts[parents_state][state] +
self.equivalent_sample_size / (num_parents_states * var_cardinality)) -
lgamma(self.equivalent_sample_size / (num_parents_states * var_cardinality)))
return score
| mit |
abhishekkrthakur/scikit-learn | sklearn/cluster/tests/test_birch.py | 5 | 5631 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
"""Sanity check for the number of samples in leaves and roots"""
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
"""Test that fit is equivalent to calling partial_fit multiple times"""
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
"""Test the predict method predicts the nearest centroid."""
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
"""Test that n_clusters param works properly"""
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
"""Test that sparse and dense data give same results"""
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
"""Test that nodes have at max branching_factor number of subclusters"""
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
"""Test that the leaf subclusters have a threshold lesser than radius"""
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
maropu/spark | python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py | 7 | 9065 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
import datetime
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ComplexOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def numeric_array_psers(self):
return [
pd.Series([[1, 2, 3]]),
pd.Series([[0.1, 0.2, 0.3]]),
pd.Series([[decimal.Decimal(1), decimal.Decimal(2), decimal.Decimal(3)]]),
]
@property
def non_numeric_array_psers(self):
return {
"string": pd.Series([["x", "y", "z"]]),
"date": pd.Series(
[[datetime.date(1994, 1, 1), datetime.date(1994, 1, 2), datetime.date(1994, 1, 3)]]
),
"bool": pd.Series([[True, True, False]]),
}
@property
def numeric_array_pssers(self):
return [ps.from_pandas(pser) for pser in self.numeric_array_psers]
@property
def non_numeric_array_pssers(self):
pssers = {}
for k, v in self.non_numeric_array_psers.items():
pssers[k] = ps.from_pandas(v)
return pssers
@property
def psers(self):
return self.numeric_array_psers + list(self.non_numeric_array_psers.values())
@property
def pssers(self):
return self.numeric_array_pssers + list(self.non_numeric_array_pssers.values())
@property
def pser(self):
return pd.Series([[1, 2, 3]])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
for pser, psser in zip(self.psers, self.pssers):
self.assert_eq(pser + pser, psser + psser)
with option_context("compute.ops_on_diff_frames", True):
# Numeric array + Numeric array
for pser1, psser1 in zip(self.numeric_array_psers, self.numeric_array_pssers):
for pser2, psser2 in zip(self.numeric_array_psers, self.numeric_array_pssers):
self.assert_eq((pser1 + pser2).sort_values(), (psser1 + psser2).sort_values())
# Non-numeric array + Non-numeric array
self.assertRaises(
TypeError,
lambda: self.non_numeric_array_pssers["string"]
+ self.non_numeric_array_pssers["bool"],
)
self.assertRaises(
TypeError,
lambda: self.non_numeric_array_pssers["string"]
+ self.non_numeric_array_pssers["date"],
)
self.assertRaises(
TypeError,
lambda: self.non_numeric_array_pssers["bool"]
+ self.non_numeric_array_pssers["date"],
)
for data_type in self.non_numeric_array_psers.keys():
self.assert_eq(
self.non_numeric_array_psers.get(data_type)
+ self.non_numeric_array_psers.get(data_type),
self.non_numeric_array_pssers.get(data_type)
+ self.non_numeric_array_pssers.get(data_type),
)
# Numeric array + Non-numeric array
for numeric_ppser in self.numeric_array_pssers:
for non_numeric_ppser in self.non_numeric_array_pssers.values():
self.assertRaises(TypeError, lambda: numeric_ppser + non_numeric_ppser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 - psser2)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 * psser2)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 / psser2)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 // psser2)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 % psser2)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser1 in self.pssers:
for psser2 in self.pssers:
self.assertRaises(TypeError, lambda: psser1 ** psser2)
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
for pser, psser in zip(self.psers, self.pssers):
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
for pser, psser in zip(self.psers, self.pssers):
self.assert_eq(pser.isnull(), psser.isnull())
def test_astype(self):
self.assert_eq(self.pser.astype(str), self.psser.astype(str))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_complex_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
elahesadatnaghib/FB-Scheduler-v2 | Graphics.py | 1 | 14359 | import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import Circle
import ephem
import sqlite3 as lite
from progressbar import ProgressBar
# Altitude and Azimuth of a single field at t (JD) in rad
def Fields_local_coordinate(Field_ra, Field_dec, t, Site):
# date and time
Site.date = t
curr_obj = ephem.FixedBody()
curr_obj._ra = Field_ra * np.pi / 180
curr_obj._dec = Field_dec * np.pi / 180
curr_obj.compute(Site)
altitude = curr_obj.alt
azimuth = curr_obj.az
return altitude, azimuth
def update_moon(t, Site) :
Moon = ephem.Moon()
Site.date = t
Moon.compute(Site)
X, Y = AltAz2XY(Moon.alt, Moon.az)
r = Moon.size / 3600 * np.pi / 180 *2
return X, Y, r, Moon.alt
def AltAz2XY(Alt, Az) :
X = np.cos(Alt) * np.cos(Az) * -1
Y = np.cos(Alt) * np.sin(Az)
#Y = Alt * 2/ np.pi
#X = Az / (2*np.pi)
return Y, -1*X
def visualize(Date, PlotID = 1,FPS = 15,Steps = 20,MP4_quality = 300, Name = "LSST Scheduler Simulator.mp4", showClouds = False):
# Import data
All_Fields = np.loadtxt("NightDataInLIS/Constants/UnlabelledFields.lis", unpack = True)
N_Fields = len(All_Fields[0])
Site = ephem.Observer()
Site.lon = -1.2320792
Site.lat = -0.517781017
Site.elevation = 2650
Site.pressure = 0.
Site.horizon = 0.
if showClouds:
Time_slots = np.loadtxt("NightDataInLIS/TimeSlots{}.lis".format(int(ephem.julian_date(Date))), unpack = True)
All_Cloud_cover = np.loadtxt("NightDataInLIS/Clouds{}.lis".format(int(ephem.julian_date(Date))), unpack = True)
#Initialize date and time
lastN_start = float(Date) -1; lastN_end = float(Date)
toN_start = float(Date); toN_end = float(Date) + 1
#Connect to the History data base
con = lite.connect('FBDE.db')
cur = con.cursor()
# Prepare to save in MP4 format
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='LSST Simulation', artist='Elahe', comment='Test')
writer = FFMpegWriter(fps=FPS, metadata=metadata)
#Progress bar initialization
pbar = ProgressBar()
# Initialize plot
Fig = plt.figure()
if PlotID == 1:
ax = plt.subplot(111, axisbg = 'black')
if PlotID == 2:
ax = plt.subplot(211, axisbg = 'black')
unobserved, Observed_lastN, Obseved_toN,\
ToN_History_line,\
uu,gg,rr,ii,zz,yy,\
last_10_History_line,\
Horizon, airmass_horizon, S_Pole,\
LSST,\
Clouds\
= ax.plot([], [], '*',[], [], '*',[], [], '*',
[], [], '*',
[], [], '*',[], [], '*',[], [], '*',
[], [], '*',[], [], '*',[], [], '*',
[], [], '-',
[], [], '-',[], [], '-',[], [], 'D',
[], [], 'o',
[], [], 'o')
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
ax.set_aspect('equal', adjustable = 'box')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Coloring
Horizon.set_color('white'); airmass_horizon.set_color('red')
S_Pole.set_markersize(3); S_Pole.set_markerfacecolor('red')
star_size = 4
unobserved.set_color('dimgray'); unobserved.set_markersize(star_size)
Observed_lastN.set_color('blue'); Observed_lastN.set_markersize(star_size)
Obseved_toN.set_color('chartreuse'); Obseved_toN.set_markersize(0)
uu.set_color('purple'); gg.set_color('green'); rr.set_color('red')
ii.set_color('orange'); zz.set_color('pink'); yy.set_color('deeppink')
Clouds.set_color('white'); Clouds.set_markersize(10);
Clouds.set_alpha(0.2); Clouds.set_markeredgecolor(None)
ToN_History_line.set_color('orange'); ToN_History_line.set_lw(.5)
last_10_History_line.set_color('gray'); last_10_History_line.set_lw(.5)
LSST.set_color('red'); LSST.set_markersize(8)
if PlotID == 2:
freqAX = plt.subplot(212)
cur.execute('SELECT N_visit, Last_visit, Second_last_visit, Third_last_visit, Fourth_last_visit From FieldsStatistics')
row = cur.fetchall()
N_visit = [x[0] for x in row]
Last_visit = [x[1] for x in row]
Second_last_visit = [x[2] for x in row]
Third_last_visit = [x[3] for x in row]
Fourth_last_visit = [x[4] for x in row]
initHistoricalcoverage = N_visit
for index, id in enumerate(All_Fields):
if Last_visit[index] > toN_start:
initHistoricalcoverage[index] -= 1
if Second_last_visit[index] > toN_start:
initHistoricalcoverage[index] -= 1
if Third_last_visit > toN_start:
initHistoricalcoverage[index] -= 1
covering,current_cover = freqAX.plot(All_Fields[0],initHistoricalcoverage,'-',[],[],'o')
freqAX.set_xlim(0,N_Fields)
freqAX.set_ylim(0,np.max(initHistoricalcoverage)+5)
covering.set_color('chartreuse'); covering.set_markersize(2)
current_cover.set_color('red'); current_cover.set_markersize(6)
cur.execute('SELECT Night_count, T_start, T_end FROM NightSummary WHERE T_start BETWEEN (?) AND (?)',(toN_start, toN_end))
row = cur.fetchone()
vID = row[0]
t_start = row[1]
t_end = row[2]
t = t_start
# Figure labels and fixed elements
Phi = np.arange(0, 2* np.pi, 0.05)
Horizon.set_data(1.01*np.cos(Phi), 1.01*np.sin(Phi))
ax.text(-1.3, 0, 'West', color = 'white', fontsize = 7)
ax.text(1.15, 0 ,'East', color = 'white', fontsize = 7)
ax.text( 0, 1.1, 'North', color = 'white', fontsize = 7)
airmass_horizon.set_data(np.cos(np.pi/4) * np.cos(Phi), np.cos(np.pi/4) * np.sin(Phi))
ax.text(-.3, 0.6, 'Acceptable airmass horizon', color = 'white', fontsize = 5, fontweight = 'bold')
Alt, Az = Fields_local_coordinate(180, -90, t, Site)
x, y = AltAz2XY(Alt,Az)
S_Pole.set_data(x, y)
ax.text(x+ .05, y, 'S-Pole', color = 'white', fontsize = 7)
# Observed last night fields
cur.execute('SELECT Field_id FROM Schedule WHERE ephemDate BETWEEN (?) AND (?)',(lastN_start, lastN_end))
row = cur.fetchall()
if row is not None:
F1 = [x[0] for x in row]
else:
F1 = []
# Tonight observation path
cur.execute('SELECT Field_id, ephemDate, filter FROM Schedule WHERE ephemDate BETWEEN (?) AND (?)',(toN_start, toN_end))
row = cur.fetchall()
if row[0][0] is not None:
F2 = [x[0] for x in row]
F2_timing = [x[1] for x in row]
F2_filtering = [x[2] for x in row]
else:
F2 = []; F2_timing = []; F2_filtering = []
# Sky elements
Moon = Circle((0, 0), 0, color = 'silver', zorder = 3)
ax.add_patch(Moon)
Moon_text = ax.text([], [], 'Moon', color = 'white', fontsize = 7)
with writer.saving(Fig, Name, MP4_quality) :
for t in pbar(np.linspace(t_start, t_end, num = Steps)):
# Find the index of the current time
time_index = 0
while t > F2_timing[time_index]:
time_index += 1
if showClouds:
Slot_n = 0
while t > Time_slots[Slot_n]:
Slot_n += 1
visit_index = 0
visited_field = 0
visit_index_u = 0; visit_index_g = 0; visit_index_r = 0; visit_index_i = 0; visit_index_z = 0; visit_index_y = 0
visit_filter = 'r'
# Object fields: F1)Observed last night F2)Observed tonight F3)Unobserved F4)Covered by clouds
F1_X = []; F1_Y = []; F2_X = []; F2_Y = []; F3_X = []; F3_Y = []; F4_X = []; F4_Y = []
# Filter coloring for tonight observation
U_X = []; U_Y = []; G_X = []; G_Y = []; R_X = []; R_Y = []; I_X = []; I_Y = []; Z_X = []; Z_Y = []; Y_X = []; Y_Y = []
# F1 coordinate:
for i in F1:
Alt, Az = Fields_local_coordinate(All_Fields[1,i-1], All_Fields[2,i-1], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F1_X.append(X); F1_Y.append(Y)
# F2 coordinate:
for i,tau,filter in zip(F2, F2_timing, F2_filtering):
Alt, Az = Fields_local_coordinate(All_Fields[1,i-1], All_Fields[2,i-1], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F2_X.append(X); F2_Y.append(Y)
if filter == 'u':
U_X.append(X); U_Y.append(Y)
if t >= tau:
visit_index_u = len(U_X) -1
elif filter == 'g':
G_X.append(X); G_Y.append(Y)
if t >= tau:
visit_index_g = len(G_Y) -1
elif filter == 'r':
R_X.append(X); R_Y.append(Y)
if t >= tau:
visit_index_r = len(R_Y) -1
elif filter == 'i':
I_X.append(X); I_Y.append(Y)
if t >= tau:
visit_index_i = len(I_Y) -1
elif filter == 'z':
Z_X.append(X); Z_Y.append(Y)
if t >= tau:
visit_index_z = len(Z_Y) -1
elif filter == 'y':
Y_X.append(X); Y_Y.append(Y)
if t >= tau:
visit_index_y = len(Y_Y) -1
if t >= tau:
visit_index = len(F2_X) -1
visited_field = i
visit_filter = filter
# F3 coordinate:
for i in range(0,N_Fields):
if True:
Alt, Az = Fields_local_coordinate(All_Fields[1,i], All_Fields[2,i], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F3_X.append(X); F3_Y.append(Y)
# F4 coordinates
if showClouds:
for i in range(0,N_Fields):
if All_Cloud_cover[Slot_n,i] == 2 or All_Cloud_cover[Slot_n,i] == 1 or All_Cloud_cover[Slot_n,i] == -1:
Alt, Az = Fields_local_coordinate(All_Fields[1,i], All_Fields[2,i], t, Site)
if Alt > 0:
X, Y = AltAz2XY(Alt,Az)
F4_X.append(X); F4_Y.append(Y)
# Update plot
unobserved.set_data([F3_X,F3_Y])
Observed_lastN.set_data([F1_X,F1_Y])
Obseved_toN.set_data([F2_X[0:visit_index],F2_Y[0:visit_index]])
uu.set_data([U_X[0:visit_index_u],U_Y[0:visit_index_u]]); gg.set_data([G_X[0:visit_index_g],G_Y[0:visit_index_g]])
rr.set_data([R_X[0:visit_index_r],R_Y[0:visit_index_r]]); ii.set_data([I_X[0:visit_index_i],I_Y[0:visit_index_i]])
zz.set_data([Z_X[0:visit_index_z],Z_Y[0:visit_index_z]]); yy.set_data([Y_X[0:visit_index_y],Y_Y[0:visit_index_y]])
ToN_History_line.set_data([F2_X[0:visit_index], F2_Y[0:visit_index]])
last_10_History_line.set_data([F2_X[visit_index - 10: visit_index], F2_Y[visit_index - 10: visit_index]])
# telescope position and color
LSST.set_data([F2_X[visit_index],F2_Y[visit_index]])
if visit_filter == 'u':
LSST.set_color('purple')
if visit_filter == 'g':
LSST.set_color('green')
if visit_filter == 'r':
LSST.set_color('red')
if visit_filter == 'i':
LSST.set_color('orange')
if visit_filter == 'z':
LSST.set_color('pink')
if visit_filter == 'y':
LSST.set_color('deeppink')
Clouds.set_data([F4_X,F4_Y])
# Update Moon
X, Y, r, alt = update_moon(t, Site)
Moon.center = X, Y
Moon.radius = r
if alt > 0:
#Moon.set_visible(True)
Moon_text.set_visible(True)
Moon_text.set_x(X+.002); Moon_text.set_y(Y+.002)
else :
Moon.set_visible(False)
Moon_text.set_visible(False)
#Update coverage
if PlotID == 2:
Historicalcoverage = np.zeros(N_Fields)
for i,tau in zip(F2, F2_timing):
if tau <= t:
Historicalcoverage[i -1] += 1
else:
break
tot = Historicalcoverage + initHistoricalcoverage
current_cover.set_data(visited_field -1,tot[visited_field -1])
covering.set_data(All_Fields[0], tot)
#Observation statistics
leg = plt.legend([Observed_lastN, Obseved_toN],
['Visited last night', time_index])
for l in leg.get_texts():
l.set_fontsize(6)
date = ephem.date(t)
Fig.suptitle('Top view of the LSST site on {}, GMT'.format(date))
'''
# progress
perc= int(100*(t - t_start)/(t_end - t_start))
if perc <= 100:
print('{} %'.format(perc))
else:
print('100 %')
'''
#Save current frame
writer.grab_frame()
'''
Site = ephem.Observer()
Site.lon = -1.2320792
Site.lat = -0.517781017
Site.elevation = 2650
Site.pressure = 0.
Site.horizon = 0.
n_nights = 3 # number of the nights to be scheduled starting from 1st Jan. 2021
Date_start = ephem.Date('2015/6/28 12:00:00.00') # times are in UT
for i in range(n_nights):
Date = ephem.Date(Date_start + i) # times are in UT
# create animation
FPS = 10 # Frame per second
Steps = 100 # Simulation steps
MP4_quality = 300 # MP4 size and quality
PlotID = 1 # 1 for one Plot, 2 for including covering pattern
visualize(Date, PlotID ,FPS, Steps, MP4_quality, 'Visualizations/LSST1plot{}.mp4'.format(i + 1), showClouds= True)
''' | mit |
dhruv13J/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/metrics/pairwise.py | 5 | 46491 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params : boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
hugobowne/scikit-learn | examples/gaussian_process/plot_gpc.py | 103 | 3927 | """
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
| bsd-3-clause |
MTgeophysics/mtpy | tests/modeling/ModEM/test_data.py | 1 | 6272 | import difflib
import glob
import os
from os.path import dirname as UP
import sys
from unittest import TestCase
import tarfile
import matplotlib.pyplot as plt
from mtpy.core.edi_collection import EdiCollection
from mtpy.modeling.modem import Data
# patch that changes the matplotlib behaviour
from tests import make_temp_dir
from tests.imaging import plt_wait, plt_close
import numpy as np
# from tests.modeling import show_patcher
plt.ion() # enable interactive
# plt.ioff() # disable interactive, which will also disable this patch
# plt.show = show_patcher(plt.show)
# end of patch
class TestData(TestCase):
"""
this test suite only validates the functionality of Data objects but does not verify the output files
"""
@classmethod
def setUpClass(cls):
# setup temp dir
cls._temp_dir = make_temp_dir(cls.__name__)
#cls._temp_dir = '/tmp/expected'
def setUp(self):
# for each test, setup a different output dir
self._output_dir = make_temp_dir(self._testMethodName, base_dir=self._temp_dir)
# set the dir to the output from the previously correct run
self._expected_output_dir = os.path.normpath(
os.path.join(
os.path.join(self._temp_dir, 'expected_data_output'),
self._testMethodName
)
)
# unzip expected output files
tfn = os.path.join(os.path.dirname(__file__), 'test_data.expected.tar.gz')
tf = tarfile.open(tfn)
output_dir = self._expected_output_dir
for member in tf.getmembers():
if (member.isreg()):
if (self._testMethodName in member.name):
member.name = os.path.basename(member.name) # remove the path by resetting it
tf.extract(member, output_dir) # extract
# end if
# end if
# end for
if not os.path.isdir(self._expected_output_dir):
self._expected_output_dir = None
def tearDown(self):
plt_wait(1)
plt_close('all')
mtpydir = UP(UP(UP(UP(os.path.abspath(__file__)))))
edi_paths = [
os.path.join(mtpydir, "data/edifiles"),
os.path.join(mtpydir, "examples/data/edi2"),
os.path.join(mtpydir, "examples/data/edi_files"),
os.path.join(mtpydir, "data/edifiles2"),
#"../MT_Datasets/3D_MT_data_edited_fromDuanJM",
#"../MT_Datasets/GA_UA_edited_10s-10000s",
]
# epsg to project to. Google epsg 'your projection'
epsg_code = 28354
epsg_code = 3112
error_types = [
# (test_name, error_type_tipper, error_tpye_z, error_value_z)
('floor_egbert', 'floor', 'egbert', 5),
('abs_egbert_floor', 'abs', 'egbert_floor', 5),
('floor_mean_od', 'floor', 'mean_od', 5),
('abs_mean_od_floor', 'abs', 'mean_od_floor', 5),
('floor_eigen', 'floor', 'eigen', 5),
('abs_eigen_floor', 'abs', 'eigen_floor', 5),
('floor_median', 'floor', 'median', 5),
('abs_median_floor', 'abs', 'median_floor', 5),
# tests with error_type_z/error_value_z specified for
# each component
('egbert_2x2_etz', 'floor', np.array([['egbert', 'egbert'],
['eigen', 'median']]), 5),
('egbert_2x2_evz', 'abs', 'egbert', np.array([[5,10], [10,5]])),
('egbert_2x2__etz_2x2__evz', 'abs', np.array([['egbert', 'egbert'],
['eigen', 'median']]),
np.array([[5,10], [10,5]]))
]
def _test_gen(edi_path, error_type_tipper, error_type_z, error_value_z):
"""
generate list of tests for the given edi path
:param index:
:param edi_path:
:return:
"""
def test_func(self):
if not os.path.isdir(edi_path):
# input file does not exist, skip test after remove the output dir
os.rmdir(self._output_dir)
self.skipTest("edi path does not exist: {}".format(edi_path))
edi_list = glob.glob(edi_path + '/*.edi')
period_list = EdiCollection(edi_list).select_periods()
datob = Data(edi_list=edi_list,
inv_mode='1',
period_list=period_list,
epsg=epsg_code,
error_type_tipper=error_type_tipper,
error_type_z=error_type_z,
error_value_z=error_value_z,
error_floor=10)
datob.write_data_file(save_path=self._output_dir)
# check the output
if self._expected_output_dir:
output_data_file = os.path.normpath(os.path.join(self._output_dir, "ModEM_Data.dat"))
self.assertTrue(os.path.isfile(output_data_file), "output data file does not exist")
expected_data_file = os.path.normpath(os.path.join(self._expected_output_dir,
"ModEM_Data.dat"))
self.assertTrue(
os.path.isfile(expected_data_file),
"expected output data file does not exist, nothing to compare"
)
print("comparing", output_data_file, "and", expected_data_file)
with open(output_data_file, 'r') as output:
with open(expected_data_file, 'r') as expected:
diff = difflib.unified_diff(
expected.readlines(),
output.readlines(),
fromfile='expected',
tofile='output'
)
count = 0
for line in diff:
sys.stdout.write(line)
count += 1
self.assertTrue(count == 0, "output different!")
else:
print("no expected output exist, nothing to compare")
return test_func
# generate tests
for edi_path in edi_paths:
for name, error_type_tipper, error_type_z, error_value_z in error_types:
test_func = _test_gen(edi_path, error_type_tipper, error_type_z, error_value_z)
test_func.__name__ = "test_{}_{}".format(os.path.basename(edi_path), name)
setattr(TestData, test_func.__name__, test_func)
if 'test_func' in globals():
del globals()['test_func']
| gpl-3.0 |
cgre-aachen/gempy | examples/tutorials/ch1_fundamentals/ch1_5_fault_relations.py | 1 | 3621 | """
1.5: Fault relations
====================
"""
# %%
# Importing gempy
import gempy as gp
# Aux imports
import numpy as np
import pandas as pd
import os
# Importing the function to find the interface
from gempy.utils.input_manipulation import find_interfaces_from_block_bottoms
import matplotlib.pyplot as plt
np.random.seed(1515)
pd.set_option('precision', 2)
# %%
# We import a model from an existing folder.
#
# %%
cwd = os.getcwd()
if 'examples' not in cwd:
data_path = os.getcwd() + '/examples/'
else:
data_path = cwd + '/../../'
geo_model = gp.load_model(r'Tutorial_ch1-9a_Fault_relations',
path=data_path + 'data/gempy_models/Tutorial_ch1-9a_Fault_relations',
recompile=True)
# %%
geo_model.faults.faults_relations_df
# %%
geo_model.faults
# %%
geo_model.surfaces
# %%
gp.compute_model(geo_model, compute_mesh=False)
# %%
geo_model.solutions.lith_block
# %%
geo_model.solutions.block_matrix[0]
# %%
gp.plot_2d(geo_model, cell_number=[25], show_data=True)
# Graben example
# --------------
# %%
geo_model_graben = gp.load_model(r'Tutorial_ch1-9b_Fault_relations',
path=data_path + 'data/gempy_models/Tutorial_ch1-9b_Fault_relations', recompile=True)
geo_model.meta.project_name = "Faults_relations"
# %%
geo_model_graben.surfaces
# %%
geo_model_graben.additional_data
# %%
# Displaying the input data:
#
# %%
gp.plot_2d(geo_model_graben, direction='y')
# %%
gp.plot_2d(geo_model_graben, direction='x')
# %%
geo_model_graben.stack
# %%
geo_model_graben.faults
# %%
geo_model_graben.faults.faults_relations_df
# %%
gp.compute_model(geo_model_graben)
# %%
gp.plot_2d(geo_model_graben, cell_number=[25], show_data=True)
# %%
# sphinx_gallery_thumbnail_number = 5
gp.plot_3d(geo_model_graben, image=True)
# %%
gp.plot_2d(geo_model_graben, cell_number=[25], show_scalar=True, series_n=0)
gp.plot_2d(geo_model_graben, cell_number=[25], show_scalar=True, series_n=1)
# %%
# Offset parameter (Experimental)
# -------------------------------
#
# %%
geo_model_graben._interpolator.theano_graph.offset.set_value(1)
gp.compute_model(geo_model_graben, compute_mesh=False)
# %%
gp.plot_2d(geo_model_graben, block=geo_model_graben.solutions.block_matrix[1, 0, :125000],
show_data=True)
# %%
gp.plot_2d(geo_model_graben, series_n=2, show_scalar=True)
# %%
geo_model_graben.solutions.scalar_field_matrix[1]
# %%
# Finding the faults intersection:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Sometimes we need to find the voxels that contain the each fault. To do
# so we can use gempy's functionality to find interfaces as follows. Lets
# use the first fault as an example:
#
# %%
gp.plot_2d(geo_model_graben,
regular_grid=geo_model_graben.solutions.block_matrix[0, 0, :125000],
show_data=True)
# %%
# Remember the fault block is stored on:
geo_model_graben.solutions.block_matrix[0, 0, :125000]
# %%
# Now we can find where is the intersection of the values 1 by calling the following function.
# This will return Trues on those voxels on the intersection
intersection = find_interfaces_from_block_bottoms(
geo_model_graben.solutions.block_matrix[0, 0, :125000].reshape(50, 50, 50), 1, shift=1)
# %%
# We can manually plotting together to see exactly what we have done
ax = gp.plot_2d(geo_model_graben,
block=geo_model_graben.solutions.block_matrix[0, 0, :125000],
show_data=True, show=False)
plt.imshow(intersection[:, 25, :].T, origin='lower', extent=(0, 1000, -1000, 0), alpha=.5)
plt.show()
gp.save_model(geo_model) | lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.