repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
SmokinCaterpillar/pypet | pypet/tests/unittests/shared_data_test.py | 1 | 37080 | __author__ = ('Robert Meyer', 'Mehmet Nevvaf Timur')
import sys
import unittest
import os
import platform
import numpy as np
import pandas as pd
import tables as pt
from pypet import SharedPandasFrame, ObjectTable, make_ordinary_result, Result, \
make_shared_result, compact_hdf5_file, SharedCArray, SharedEArray, \
SharedVLArray
from pypet.tests.testutils.ioutils import get_root_logger, parse_args, run_suite
from pypet.tests.testutils.ioutils import make_temp_dir, make_trajectory_name, unittest
from pypet.tests.testutils.data import TrajectoryComparator
from pypet import Trajectory, SharedResult, SharedTable, SharedArray, load_trajectory, StorageContextManager
class MyTable(pt.IsDescription):
id = pt.Int32Col()
name = pt.StringCol(15)
surname = pt.StringCol(15)
weight = pt.FloatCol()
class StorageDataTrajectoryTests(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5'
def test_conversions(self):
filename = make_temp_dir('hdf5manipulation.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
traj.v_standard_result = SharedResult
traj.f_store(only_init=True)
traj.f_add_result('shared_data')
thedata = np.zeros((1000, 1000))
myarray = SharedArray('array', traj.shared_data, trajectory=traj)
traj.shared_data['array'] = myarray
mytable = SharedTable('t1', traj.shared_data, trajectory=traj)
traj.shared_data['t1'] = mytable
dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
dadict2 = {'answer': [42]}
res = traj.f_add_result('shared.dfs')
res['df'] = SharedPandasFrame()
res['df'].create_shared_data(data=pd.DataFrame(dadict), trajectory=traj)
frame = SharedPandasFrame('df1', traj.f_get('shared.dfs'), trajectory=traj,
add_to_parent=True)
frame.create_shared_data(data=pd.DataFrame(dadict2),)
res['df1'] = frame
traj.f_add_result('mylist', [1, 2, 3])
traj.f_add_result('my.mytuple', k=(1, 2, 3), wa=42)
traj.f_add_result('my.myarray', np.zeros((50, 50)))
traj.f_add_result('my.myframe', data=pd.DataFrame(dadict2))
traj.f_add_result('my.mytable', ObjectTable(data=dadict2))
myarray.create_shared_data(data=thedata)
mytable.create_shared_data(first_row={'hi': 'hi'.encode('utf-8'), 'huhu': np.ones(3)})
traj.f_store()
data = myarray.read()
myarray.get_data_node()
self.assertTrue(np.all(data == thedata))
with StorageContextManager(traj):
myarray[2, 2] = 10
data = myarray.read()
self.assertTrue(data[2, 2] == 10)
self.assertTrue(data[2, 2] == 10)
self.assertFalse(traj.v_storage_service.is_open)
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
make_ordinary_result(traj.shared_data, 'array', trajectory=traj)
array = traj.shared_data.array
self.assertTrue(isinstance(array, np.ndarray))
thedata[2, 2] = 10
self.assertTrue(np.all(array == thedata))
make_ordinary_result(traj.shared_data, 't1', trajectory=traj,)
t1 = traj.shared_data.t1
self.assertTrue(isinstance(t1, ObjectTable))
self.assertTrue(np.all(t1['huhu'][0] == np.ones(3)))
dfs = traj.shared.dfs
make_ordinary_result(traj.shared.dfs, 'df', trajectory=traj)
theframe = dfs.f_get('df')
self.assertTrue(isinstance(dfs, Result))
self.assertTrue(isinstance(theframe, pd.DataFrame))
self.assertTrue(theframe['hi'][0] == 1)
listres = traj.f_get('mylist')
listres = make_shared_result(listres, 0, trajectory=traj)
with StorageContextManager(traj):
self.assertTrue(listres[0][2] == 3)
listres[0][0] = 4
self.assertTrue(listres[0][0] == 4)
listres = make_ordinary_result(listres, 0, trajectory=traj)
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
mylist = traj.mylist
self.assertTrue(isinstance(listres, Result))
self.assertTrue(mylist[0] == 4)
self.assertTrue(isinstance(mylist, list))
mytuple = traj.mytuple
with self.assertRaises(AttributeError):
mytuple = make_shared_result(mytuple, 'mylist', traj, new_class=SharedArray)
mytuple = make_shared_result(mytuple, 'k', traj, new_class=SharedArray)
self.assertTrue(mytuple.k[1] == 2)
mytuple = make_ordinary_result(mytuple, 'k', trajectory=traj)
self.assertTrue(isinstance(mytuple.k, tuple))
self.assertTrue(mytuple.k[2] == 3)
myframe = traj.myframe
myframe = make_shared_result(myframe, 'data', traj)
theframe = myframe.data.read()
self.assertTrue(theframe['answer'][0] == 42)
myframe = make_ordinary_result(myframe, 'data', trajectory=traj)
traj.f_load_item(myframe)
self.assertTrue(myframe.data['answer'][0] == 42)
mytable = traj.f_get('mytable')
mytable = make_shared_result(mytable, 0, traj)
self.assertTrue(isinstance(mytable[0], SharedTable))
rows = mytable.mytable.read()
self.assertTrue(rows[0][0] == 42)
mytable = make_ordinary_result(mytable, 0, trajectory=traj)
self.assertTrue(isinstance(mytable, Result))
self.assertTrue(mytable[0]['answer'][0] == 42)
def test_storing_and_manipulating(self):
filename = make_temp_dir('hdf5manipulation.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
thedata = np.zeros((1000, 1000))
res = traj.f_add_result(SharedResult, 'shared')
myarray = SharedArray('array', res, trajectory=traj, add_to_parent=True)
mytable = SharedTable('t1', res, trajectory=traj, add_to_parent=True)
mytable2 = SharedTable('t2', res, trajectory=traj, add_to_parent=True)
mytable3 = SharedTable('t3', res, trajectory=traj, add_to_parent=True)
traj.f_store(only_init=True)
myarray.create_shared_data(data=thedata)
mytable.create_shared_data(first_row={'hi': 'hi'.encode('utf-8'), 'huhu': np.ones(3)})
mytable2.create_shared_data(description={'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1)})
mytable3.create_shared_data(description={'ha': pt.StringCol(2, pos=0), 'haha': pt.FloatCol(pos=1)})
traj.f_store()
newrow = {'ha': 'hu', 'haha': 4.0}
with self.assertRaises(TypeError):
traj.shared.t2.row
with StorageContextManager(traj) as cm:
row = traj.shared.t2.row
for irun in range(11):
for key, val in newrow.items():
row[key] = val
row.append()
traj.shared.t3.flush()
data = myarray.read()
myarray.get_data_node()
self.assertTrue(np.all(data == thedata))
with StorageContextManager(traj):
myarray[2, 2] = 10
data = myarray.read()
self.assertTrue(data[2, 2] == 10)
self.assertTrue(data[2, 2] == 10)
self.assertFalse(traj.v_storage_service.is_open)
traj = load_trajectory(name=trajname, filename=filename)
traj.f_load(load_data=2)
traj.shared.t2.traj = traj
traj.shared.t1.traj = traj
traj.shared.array.traj = traj
self.assertTrue(traj.shared.t2.nrows == 11, '%s != 11' % str(traj.shared.t2.nrows))
self.assertTrue(traj.shared.t2[0]['ha'] == 'hu'.encode('utf-8'), traj.shared.t2[0]['ha'])
self.assertTrue(traj.shared.t2[1]['ha'] == 'hu'.encode('utf-8'), traj.shared.t2[1]['ha'])
self.assertTrue('huhu' in traj.shared.t1.colnames)
self.assertTrue(traj.shared.array[2, 2] == 10)
@unittest.skipIf(platform.system() == 'Windows', 'Not supported under Windows')
def test_compacting(self):
filename = make_temp_dir('hdf5compacting.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
traj.v_storage_service.complevel = 7
first_row = {'ha': 'hi'.encode('utf-8'), 'haha': np.zeros((3, 3))}
traj.f_store(only_init=True)
traj.f_add_result('My.Tree.Will.Be.Deleted', 42)
traj.f_add_result('Mine.Too.HomeBoy', 42, comment='Don`t cry for me!')
res = traj.f_add_result(SharedResult, 'myres')
res['myres'] = SharedTable()
res['myres'].create_shared_data(first_row=first_row)
with StorageContextManager(traj):
traj.myres
for irun in range(10000):
row = traj.myres.row
for key in first_row:
row[key] = first_row[key]
row.append()
traj.f_store()
del traj
traj = load_trajectory(name=trajname, filename=filename, load_all=2)
with StorageContextManager(traj) as cm:
tb = traj.myres.get_data_node()
tb.remove_rows(1000, 10000)
cm.flush_store()
self.assertTrue(traj.myres.nrows == 1001)
traj.f_delete_item(traj.My, recursive=True)
traj.f_delete_item(traj.Mine, recursive=True)
size = os.path.getsize(filename)
get_root_logger().info('Filesize is %s' % str(size))
name_wo_ext, ext = os.path.splitext(filename)
backup_file_name = name_wo_ext + '_backup' + ext
code = compact_hdf5_file(filename, keep_backup=True)
if code != 0:
raise RuntimeError('ptrepack fail')
backup_size = os.path.getsize(backup_file_name)
self.assertTrue(backup_size == size)
new_size = os.path.getsize(filename)
get_root_logger().info('New filesize is %s' % str(new_size))
self.assertTrue(new_size < size, "%s > %s" % (str(new_size), str(size)))
def test_all_arrays(self):
filename = make_temp_dir('hdf5arrays.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
trajname = traj.v_name
npearray = np.ones((2, 10, 3), dtype=np.float)
thevlarray = np.array(['j'.encode('utf-8'), 22.2, 'gutter'.encode('utf-8')])
traj.f_store(only_init=True)
res = traj.f_add_result(SharedResult, 'arrays')
res['carray'] = SharedCArray()
res['carray'].create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
res['earray'] = SharedEArray()
res['earray'].create_shared_data(obj=npearray)
res['vlarray'] = SharedVLArray()
res['vlarray'].create_shared_data(obj=thevlarray)
res['array'] = SharedArray()
res['array'].create_shared_data(data=npearray)
traj.f_store()
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
toappned = [44, 'k'.encode('utf-8')]
with StorageContextManager(traj):
a1 = traj.arrays.array
a1[0, 0, 0] = 4.0
a2 = traj.arrays.carray
a2[0, 1] = 4
a4 = traj.arrays.vlarray
a4.append(toappned)
a3 = traj.arrays.earray
a3.append(np.zeros((1, 10, 3)))
traj = load_trajectory(name=trajname, filename=filename, load_all=2,
dynamic_imports=SharedResult)
with StorageContextManager(traj):
a1 = traj.arrays.array
self.assertTrue(a1[0, 0, 0] == 4.0)
a2 = traj.arrays.carray
self.assertTrue(a2[0, 1] == 4)
a3 = traj.arrays.earray
self.assertTrue(a3.read().shape == (3, 10, 3))
a4 = traj.arrays.vlarray
for idx, x in enumerate(a4):
if idx == 0:
self.assertTrue(np.all(x == np.array(thevlarray)))
elif idx == 1:
self.assertTrue(np.all(x == np.array(toappned)))
else:
raise RuntimeError()
def test_df(self):
filename = make_temp_dir('hdf5errors.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
traj.f_store()
dadict = {'hi': [1, 2, 3, 4, 5], 'shu': ['bi', 'du', 'da', 'ha', 'hui']}
dadict2 = {'answer': [42]}
traj.f_add_result(SharedResult, 'dfs.df', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict))
traj.f_add_result(SharedResult, 'dfs.df1', SharedPandasFrame()).create_shared_data(data=pd.DataFrame(dadict2))
traj.f_add_result(SharedResult, 'dfs.df3', SharedPandasFrame())
for irun in range(10):
traj.df3.append(traj.df1.read())
dframe = traj.df3.read()
self.assertTrue(len(dframe) == 10)
what = traj.df.select(where='index == 2')
self.assertTrue(len(what) == 1)
def test_errors(self):
filename = make_temp_dir('hdf5errors.hdf5')
traj = Trajectory(name=make_trajectory_name(self), filename=filename)
npearray = np.ones((2, 10, 3), dtype=np.float)
thevlarray = np.array(['j'.encode('utf-8'), 22.2, 'gutter'.encode('utf-8')])
with self.assertRaises(TypeError):
traj.f_add_result(SharedResult, 'arrays.vlarray', SharedVLArray()).create_shared_data(obj=thevlarray)
traj.f_store()
traj.arrays.vlarray.create_shared_data(obj=thevlarray)
traj.f_add_result(SharedResult, 'arrays.array', SharedArray()).create_shared_data(data=npearray)
traj.arrays.f_add_result(SharedResult, 'super.carray', SharedCArray(),
comment='carray').create_shared_data(shape=(10, 10), atom=pt.atom.FloatAtom())
traj.arrays.f_add_result(SharedResult, 'earray', SharedEArray()).create_shared_data('earray',
obj=npearray)
traj.f_store()
with self.assertRaises(TypeError):
traj.arrays.array.iterrows()
with StorageContextManager(traj):
with self.assertRaises(RuntimeError):
with StorageContextManager(traj):
pass
self.assertTrue(traj.v_storage_service.is_open)
with self.assertRaises(RuntimeError):
StorageContextManager(traj).open_store()
self.assertFalse(traj.v_storage_service.is_open)
class SharedTableTest(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'table', 'mehmet'
def setUp(self):
self.filename = make_temp_dir('shared_table_test.hdf5')
self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename)
self.traj.v_standard_result = SharedResult
self.traj.f_store(only_init=True)
self.traj.f_add_result('shared_data')
self.shared_table = SharedTable(name='table',
parent=self.traj.shared_data,
trajectory=self.traj,
add_to_parent=True)
def test_table_read(self):
the_reading_table = self.traj.results.shared_data.table
self.assertTrue(the_reading_table is self.shared_table)
the_reading_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_reading_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_reading_table.flush()
for idx, row in enumerate(the_reading_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_reading_table = traj2.results.shared_data.table
self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))
second_reading_table.append([(21, 'aaa', 'bbb', 100)])
self.assertTrue(np.all(the_reading_table.read() == second_reading_table.read()))
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_reading_table = traj3.results.shared_data.table
self.assertTrue(np.all(the_reading_table.read() == third_reading_table.read()))
def test_table_append(self):
the_append_table = self.traj.results.shared_data.table
self.assertTrue(the_append_table is self.shared_table)
the_append_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_append_table.row
for i in range(15):
row['id'] = i * 2
row['name'] = 'name %d' % i
row['surname'] = '%d surname' % i
row['weight'] = (i*0.5 + 50.0)
row.append()
the_append_table.flush()
for idx, row in enumerate(the_append_table.iterrows()):
self.assertEqual(row['id'], idx * 2)
self.assertEqual(row['name'], ('name %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], ('%d surname' % idx).encode('utf-8'))
self.assertEqual(row['weight'], idx*0.5+50.0)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_append_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_append_table.iterrows()):
self.assertEqual(row['id'], idx * 2)
self.assertEqual(row['name'], ('name %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], ('%d surname' % idx).encode('utf-8'))
self.assertEqual(row['weight'], idx*0.5+50.0)
second_append_table.append([(30, 'mehmet', 'timur', 65.5)])
self.assertEqual(second_append_table.read(field='id')[-1], 30)
self.assertEqual(second_append_table.read(field='name')[-1], 'mehmet'.encode('utf-8'))
self.assertEqual(second_append_table.read(field='surname')[-1], 'timur'.encode('utf-8'))
self.assertEqual(second_append_table.read(field='weight')[-1], 65.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_append_table = traj3.results.shared_data.table
self.assertEqual((third_append_table.read(field='id')[-1]), 30)
self.assertEqual((third_append_table.read(field='name')[-1]), 'mehmet'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='surname')[-1]), 'timur'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='weight')[-1]), 65.5)
third_append_table.append([(33, 'Harrison', 'Ford', 95.5)])
self.assertEqual((third_append_table.read(field='id')[-1]), 33)
self.assertEqual((third_append_table.read(field='name')[-1]), 'Harrison'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='surname')[-1]), 'Ford'.encode('utf-8'))
self.assertEqual((third_append_table.read(field='weight')[-1]), 95.5)
def test_table_iterrows(self):
the_iterrows_table = self.traj.results.shared_data.table
self.assertTrue(the_iterrows_table is self.shared_table)
the_iterrows_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_iterrows_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_iterrows_table.flush()
for idx, row in enumerate(the_iterrows_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_table.iterrows()):
self.assertEqual(row['id'], idx)
def test_table_col(self):
the_col_table = self.traj.results.shared_data.table
self.assertTrue(the_col_table is self.shared_table)
the_col_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_col_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_col_table.flush()
for idx, row in enumerate(the_col_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_col_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_col_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertTrue(np.all(second_col_table.read(field='id') == second_col_table.col('id')))
self.assertTrue(np.all(second_col_table.read(field='name') == second_col_table.col('name')))
self.assertTrue(np.all(second_col_table.read(field='surname') == second_col_table.col('surname')))
self.assertTrue(np.all(second_col_table.read(field='weight') == second_col_table.col('weight')))
# def test_table_itersequence(self):
# pass
#
# def test_table_itersorted(self):
# pass
#
# def test_table_read_coordinates(self):
# pass
#
# def test_table_read_sorted(self):
# pass
def test_table_getitem(self):
the_getitem_table = self.traj.results.shared_data.table
self.assertTrue(the_getitem_table is self.shared_table)
the_getitem_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_getitem_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_getitem_table.flush()
for idx, row in enumerate(the_getitem_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_getitem_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_getitem_table.iterrows()):
self.assertTrue(np.all(second_getitem_table.read()[idx] == second_getitem_table[idx]))
second_getitem_table.append([(30, 'mehmet nevvaf', 'timur', 65.5)])
for idx, row in enumerate(second_getitem_table.iterrows(-1)):
self.assertEqual(row['id'], 30)
self.assertEqual(row['name'], 'mehmet nevvaf'.encode('utf-8'))
self.assertEqual(row['surname'], 'timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_getitem_table = traj3.results.shared_data.table
with StorageContextManager(traj3):
for idx, row in enumerate(third_getitem_table.iterrows()):
self.assertTrue(np.all(third_getitem_table.read()[idx] == third_getitem_table[idx]))
# def test_table_iter(self):
# pass
#
# def test_table_modify_column(self):
# pass
#
# def test_table_modify_columns(self):
# pass
#
# def test_table_modify_coordinates(self):
# pass
#
# def test_table_modify_rows(self):
# pass
#
# def test_table_remove_rows(self):
# pass
#
# def test_table_remove_row(self):
# pass
def test_table_setitem(self):
the_setitem_table = self.traj.results.shared_data.table
self.assertTrue(the_setitem_table is self.shared_table)
the_setitem_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_setitem_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i * 1.5
row.append()
the_setitem_table.flush()
for idx, row in enumerate(the_setitem_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_setitem_table = traj2.results.shared_data.table
second_setitem_table[0] = [(100, 'Mehmet Nevvaf', 'TIMUR', 75.5)]
self.assertEqual(second_setitem_table.read(field='id')[0], 100)
self.assertEqual(second_setitem_table.read(field='name')[0], 'Mehmet Nevvaf'.encode('utf-8'))
self.assertEqual(second_setitem_table.read(field='surname')[0], 'TIMUR'.encode('utf-8'))
self.assertEqual(second_setitem_table.read(field='weight')[0], 75.5)
traj2.f_store()
traj3 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
third_setitem_table = traj3.results.shared_data.table
self.assertEqual(third_setitem_table.read(field='id')[0], 100)
self.assertEqual(third_setitem_table.read(field='name')[0], 'Mehmet Nevvaf'.encode('utf-8'))
self.assertEqual(third_setitem_table.read(field='surname')[0], 'TIMUR'.encode('utf-8'))
self.assertEqual(third_setitem_table.read(field='weight')[0], 75.5)
# def test_table_get_where_list(self):
# pass
#
# def test_table_read_where(self):
# pass
def test_table_where(self):
the_where_table = self.traj.results.shared_data.table
self.assertTrue(the_where_table is self.shared_table)
the_where_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_where_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
the_where_table.flush()
for idx, row in enumerate(the_where_table.iterrows()):
self.assertEqual(row['id'], idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_where_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
result = second_where_table.where('(id == 2)&(name == b"mehmet 2")&(surname ==b"Timur")&(weight == 67.5)')
there = False
for row in result:
there = True
self.assertTrue(there)
# def test_table_append_where(self):
# pass
#
# def test_table_will_query_use_indexing(self):
# pass
#
# def test_table_copy(self):
# pass
#
# def test_table_flush_rows_to_index(self):
# pass
#
# def test_table_get_enum(self):
# pass
#
# def test_table_reindex(self):
# pass
#
# def test_table_reindex_dirty(self):
# pass
#
# def test_table_remove_index(self):
# pass
#
# def test_table_create_index(self):
# pass
#
# def test_table_create_cindex(self):
# pass
#
# def test_table_colindexes(self):
# pass
#
# def test_table_cols(self):
# pass
#
# def test_table_row(self):
# pass
def test_table_flush(self):
the_flush_table = self.traj.results.shared_data.table
self.assertTrue(the_flush_table is self.shared_table)
the_flush_table.create_shared_data(description=MyTable)
with StorageContextManager(self.traj):
row = the_flush_table.row
for i in range(10):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
the_flush_table.flush()
for idx, row in enumerate(the_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_flush_table = traj2.results.shared_data.table
with StorageContextManager(traj2):
for idx, row in enumerate(second_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
row = second_flush_table.row
for i in range(10, 11):
row['id'] = i
row['name'] = 'mehmet %d' % i
row['surname'] = 'Timur'
row['weight'] = 65.5 + i
row.append()
second_flush_table.flush()
for idx, row in enumerate(second_flush_table.iterrows()):
self.assertEqual(row['id'], idx)
self.assertEqual(row['name'], ('mehmet %d' % idx).encode('utf-8'))
self.assertEqual(row['surname'], 'Timur'.encode('utf-8'))
self.assertEqual(row['weight'], 65.5+idx)
class SharedArrayTest(TrajectoryComparator):
tags = 'unittest', 'trajectory', 'shared', 'hdf5', 'array', 'mehmet'
def setUp(self):
self.filename = make_temp_dir('shared_table_test.hdf5')
self.traj = Trajectory(name=make_trajectory_name(self), filename=self.filename)
self.traj.v_standard_result = SharedResult
self.traj.f_store(only_init=True)
self.traj.f_add_result('shared_data')
self.shared_array = SharedArray(name='array',
parent=self.traj.shared_data,
trajectory=self.traj,
add_to_parent=True)
def test_array_read(self):
the_reading_array = np.ones((100, 100)) * 4
first_reading_array = self.traj.results.shared_data.array
self.assertTrue(first_reading_array is self.shared_array)
first_reading_array.create_shared_data(obj=the_reading_array)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_reading_array = traj2.shared_data.array.read()
self.assertTrue(np.all(the_reading_array == second_reading_array),
'%s != %s' % (str(the_reading_array), str(second_reading_array)))
def test_array_getitem(self):
the_getitem_array = np.array(range(100))
first_getitem_array = self.traj.results.shared_data.array
first_getitem_array.create_shared_data(obj=the_getitem_array)
for k in range(len(the_getitem_array)):
self.assertEqual(the_getitem_array[k], first_getitem_array[k])
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
for j in range(len(the_getitem_array)):
self.assertEqual(the_getitem_array[j], traj2.results.shared_data.array[j])
def test_array_getenum(self):
the_getenum_array = np.array(range(100))
first_getenum_array = self.traj.results.shared_data.array
first_getenum_array.create_shared_data(obj=the_getenum_array)
with self.assertRaises(TypeError):
first_getenum_array.get_enum()
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_enum_array = traj2.results.shared_data.array
with self.assertRaises(TypeError):
second_enum_array.get_enum()
def test_array_iterrows(self):
the_iterrows_array = np.random.randint(0, 100, (100, 100))
first_iterrows_array = self.traj.results.shared_data.array
first_iterrows_array.create_shared_data(obj=the_iterrows_array)
with StorageContextManager(self.traj):
for idx, row in enumerate(first_iterrows_array.iterrows()):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_array = traj2.results.shared_data.array
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_array.iterrows()):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
def test_array_setitem(self):
the_setitem_array = np.zeros((50, 50))
first_setitem_array = self.traj.results.shared_data.array
first_setitem_array.create_shared_data(obj=the_setitem_array)
first_setitem_array[2, 2] = 10
self.assertEqual(first_setitem_array[2, 2], 10)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_setitem_array = traj2.results.shared_data.array
self.assertEqual(second_setitem_array[2, 2], 10)
second_setitem_array[3, 3] = 17
self.assertEqual(second_setitem_array[3, 3], 17)
def test_array_iter(self):
the_iterrows_array = np.random.randint(0, 100, (100, 100))
first_iterrows_array = self.traj.results.shared_data.array
first_iterrows_array.create_shared_data(obj=the_iterrows_array)
with StorageContextManager(self.traj):
for idx, row in enumerate(first_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.assertTrue(np.all(the_iterrows_array == first_iterrows_array.read()))
for idx, row in enumerate(the_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_iterrows_array = traj2.results.shared_data.array
with StorageContextManager(traj2):
for idx, row in enumerate(second_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
self.assertTrue(np.all(the_iterrows_array == second_iterrows_array.read()))
for idx, row in enumerate(second_iterrows_array):
self.assertTrue(np.all(row == the_iterrows_array[idx, :]))
def test_array_len(self):
the_len_array = np.ones((100, 100))
first_len_array = self.traj.results.shared_data.array
self.assertTrue(first_len_array is self.shared_array)
first_len_array.create_shared_data(obj=the_len_array)
self.assertEqual(len(first_len_array), 100)
self.traj.f_store()
traj2 = load_trajectory(name=self.traj.v_name, filename=self.filename, load_all=2, dynamic_imports=SharedResult)
second_len_array = traj2.results.shared_data.array
self.assertEqual(len(second_len_array), 100)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args) | bsd-3-clause |
I--P/numpy | numpy/linalg/linalg.py | 4 | 75738 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| bsd-3-clause |
toobaz/pandas | pandas/tests/io/json/test_pandas.py | 2 | 57735 | from collections import OrderedDict
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
ensure_clean,
network,
)
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=["D", "C", "B", "A"])
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
class TestPandasContainer:
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
self.dirpath = datapath("io", "json", "data")
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
self.series = tm.makeStringSeries()
self.series.name = "series"
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = "objects"
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame()
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
assert_frame_equal(
df, read_json(df.to_json(orient="columns"), orient="columns")
)
assert_frame_equal(df, read_json(df.to_json(orient="index"), orient="index"))
df_unser = read_json(df.to_json(orient="records"), orient="records")
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = "DataFrame index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="index")
msg = "DataFrame index must be unique for orient='columns'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="columns")
assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
unser = read_json(df.to_json(orient="records"), orient="records")
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient="values"), orient="values")
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = "DataFrame columns must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="index")
msg = "DataFrame columns must be unique for orient='columns'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="columns")
msg = "DataFrame columns must be unique for orient='records'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="records")
assert_frame_equal(
df, read_json(df.to_json(orient="split"), orient="split", dtype=False)
)
unser = read_json(df.to_json(orient="values"), orient="values")
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "y"])
result = read_json(df.to_json(orient="split"), orient="split")
assert_frame_equal(result, df)
def _check(df):
result = read_json(
df.to_json(orient="split"), orient="split", convert_dates=["x"]
)
assert_frame_equal(result, df)
for o in [
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
]:
_check(DataFrame(o, index=[1, 2], columns=["x", "x"]))
def test_frame_from_json_to_json(self):
def _check_orient(
df,
orient,
dtype=None,
numpy=False,
convert_axes=True,
check_dtype=True,
raise_ok=None,
sort=None,
check_index_type=True,
check_column_type=True,
check_numpy_dtype=False,
):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ["index", "columns"]:
msg = "DataFrame index must be unique for orient='{}'".format(orient)
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
return
if not df.columns.is_unique and orient in ["index", "columns", "records"]:
# TODO: not executed. fix this.
with pytest.raises(ValueError, match="ksjkajksfjksjfkjs"):
df.to_json(orient=orient)
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(
dfjson,
orient=orient,
dtype=dtype,
numpy=numpy,
convert_axes=convert_axes,
)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if not dtype:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(unser.index.values.astype("i8") * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(
df.values, unser.values, check_dtype=check_numpy_dtype
)
tm.assert_index_equal(
df.columns, unser.columns, exact=check_column_type
)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(
df.values, unser.values, check_dtype=check_numpy_dtype
)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(
df.values, unser.values, check_dtype=check_numpy_dtype
)
else:
if convert_axes:
tm.assert_frame_equal(
df,
unser,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type,
)
else:
tm.assert_frame_equal(
df, unser, check_less_precise=False, check_dtype=check_dtype
)
def _check_all_orients(
df,
dtype=None,
convert_axes=True,
raise_ok=None,
sort=None,
check_index_type=True,
check_column_type=True,
):
# numpy=False
if convert_axes:
_check_orient(
df,
"columns",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"records",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"split",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"index",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"values",
dtype=dtype,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(df, "columns", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype, convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype, convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(
df,
"columns",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"records",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"split",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"index",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"values",
dtype=dtype,
numpy=True,
raise_ok=raise_ok,
sort=sort,
check_index_type=False,
check_column_type=False,
)
_check_orient(
df,
"columns",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"records",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"split",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"index",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
_check_orient(
df,
"values",
dtype=dtype,
numpy=True,
convert_axes=False,
raise_ok=raise_ok,
sort=sort,
)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
)
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(
DataFrame(biggie, dtype=np.float64), dtype=np.float64, convert_axes=False
)
_check_all_orients(
DataFrame(biggie, dtype=np.int), dtype=np.int, convert_axes=False
)
_check_all_orients(
DataFrame(biggie, dtype="U3"),
dtype="U3",
convert_axes=False,
raise_ok=ValueError,
)
# categorical
_check_all_orients(self.categorical, sort="sort", raise_ok=ValueError)
# empty
_check_all_orients(
self.empty_frame, check_index_type=False, check_column_type=False
)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
with pytest.raises(ValueError, match="Expected object or value"):
read_json(StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
)
msg = r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)"
with pytest.raises(ValueError, match=msg):
read_json(json, orient="split")
# too many columns
json = StringIO(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
)
msg = "3 columns passed, passed data had 2 columns"
with pytest.raises(ValueError, match=msg):
read_json(json, orient="split")
# bad key
json = StringIO(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}'
)
with pytest.raises(ValueError, match=r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([["1", "2"], ["4", "5", "6"]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser["2"]["0"] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False, convert_axes=False, dtype=False)
assert unser["2"]["0"] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
v12_json = os.path.join(self.dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(self.dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self):
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
assert df_nonprintable.to_json(
default_handler=str
) == '{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
assert df_mixed.to_json(
default_handler=str
) == '{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
df = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]})
assert df.to_json() == '{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
bar=("bar" * 100000)
)
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(
series, orient, dtype=None, numpy=False, check_index_type=True
):
series = series.sort_index()
unser = read_json(
series.to_json(orient=orient),
typ="series",
orient=orient,
numpy=numpy,
dtype=dtype,
)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(
series, unser, check_index_type=check_index_type
)
else:
assert_series_equal(
series,
unser,
check_names=False,
check_index_type=check_index_type,
)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(
series, "columns", dtype=dtype, check_index_type=check_index_type
)
_check_orient(
series, "records", dtype=dtype, check_index_type=check_index_type
)
_check_orient(
series, "split", dtype=dtype, check_index_type=check_index_type
)
_check_orient(
series, "index", dtype=dtype, check_index_type=check_index_type
)
_check_orient(series, "values", dtype=dtype)
_check_orient(
series,
"columns",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"records",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"split",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"index",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
_check_orient(
series,
"values",
dtype=dtype,
numpy=True,
check_index_type=check_index_type,
)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series(
[str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name,
)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False, check_column_type=False)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean("test.json") as path:
for df in [
self.frame,
self.frame2,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ="series")
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ="series")
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = [
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
]
for infer_word in infer_words:
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
assert_frame_equal(result, expected)
test_w_date("20130101 20:43:42.123")
test_w_date("20130101 20:43:42", date_unit="s")
test_w_date("20130101 20:43:42.123", date_unit="ms")
test_w_date("20130101 20:43:42.123456", date_unit="us")
test_w_date("20130101 20:43:42.123456789", date_unit="ns")
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
assert_series_equal(result, expected)
test_w_date("20130101 20:43:42.123")
test_w_date("20130101 20:43:42", date_unit="s")
test_w_date("20130101 20:43:42.123", date_unit="ms")
test_w_date("20130101 20:43:42.123456", date_unit="us")
test_w_date("20130101 20:43:42.123456789", date_unit="ns")
ts = Series(Timestamp("20130101 20:43:42.123"), index=self.ts.index)
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
ts.to_json(date_format="iso", date_unit="foo")
def test_date_unit(self):
df = self.tsframe.copy()
df["date"] = Timestamp("20130101 20:43:42")
dl = df.columns.get_loc("date")
df.iloc[1, dl] = Timestamp("19710101 20:43:42")
df.iloc[2, dl] = Timestamp("21460101 20:43:42")
df.iloc[4, dl] = pd.NaT
for unit in ("s", "ms", "us", "ns"):
json = df.to_json(date_format="epoch", date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r"""{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}"""
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
dfj2["date"] = Timestamp("20130101")
dfj2["ints"] = range(5)
dfj2["bools"] = True
dfj2.index = pd.date_range("20130101", periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\['a', 'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with pytest.raises(AssertionError, match=error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
@network
@pytest.mark.single
def test_round_trip_exception_(self):
# GH 3867
csv = "https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv"
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
@network
@pytest.mark.single
@pytest.mark.parametrize(
"field,dtype",
[
["created_at", pd.DatetimeTZDtype(tz="UTC")],
["closed_at", "datetime64[ns]"],
["updated_at", pd.DatetimeTZDtype(tz="UTC")],
],
)
def test_url(self, field, dtype):
url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5" # noqa
result = read_json(url, convert_dates=True)
assert result[field].dtype == dtype
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit="ms")
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
assert s.dtype == "timedelta64[ns]"
result = pd.read_json(s.to_json(), typ="series").apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == "timedelta64[ns]"
assert_frame_equal(frame, pd.read_json(frame.to_json()).apply(converter))
frame = DataFrame(
{
"a": [timedelta(days=23), timedelta(seconds=5)],
"b": [1, 2],
"c": pd.date_range(start="20130101", periods=2),
}
)
result = pd.read_json(frame.to_json(date_unit="ns"))
result["a"] = pd.to_timedelta(result.a, unit="ns")
result["c"] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame(
{"a": [timedelta(23), pd.Timestamp("20130101")]}, dtype=object
)
expected = DataFrame(
{"a": [pd.Timedelta(frame.a[0]).value, pd.Timestamp(frame.a[1]).value]}
)
result = pd.read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({"a": [7, value]})
expected = DataFrame({"a": [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
return str(obj)
df_list = [
9,
DataFrame(
{"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
columns=["a", "b"],
),
]
expected = (
'[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]'
)
assert dumps(df_list, default_handler=default, orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame(
{"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
columns=["a", "b"],
)
expected = (
'[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]'
)
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
msg = "raisin"
def my_handler_raises(obj):
raise TypeError(msg)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, object()]}).to_json(
default_handler=my_handler_raises
)
with pytest.raises(TypeError, match=msg):
DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
default_handler=my_handler_raises
)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype("category")
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
tz_naive = tz_range.tz_convert("utc").tz_localize(None)
df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
df_naive = df.copy()
df_naive["A"] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp("2013-01-10 05:00:00Z")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp("2013-01-10 00:00:00", tz="US/Eastern")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp("2013-01-10 00:00:00-0500")
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = (
'{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}'
)
tz_range = pd.date_range("2013-01-01 05:00:00Z", periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
result = dumps(df, iso_dates=True)
assert result == dfexp
tz_range = pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern")
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range("2013-01-01 00:00:00-0500", periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({"DT": dti})
assert dumps(df, iso_dates=True) == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
@td.skip_if_not_us_locale
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json("s3n://pandas-test/items.jsonl", lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with ensure_clean("tmp_items.json") as path:
with open(path, "w") as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e21, index=["articleId"])
assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n' '{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# TODO: there is a near-identical test for pytables; can we share?
def test_latin_encoding(self):
# GH 13774
pytest.skip("encoding not implemented in .to_json(), xref #13774")
values = [
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
]
def _try_decode(x, encoding="latin-1"):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ["category", object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding="latin-1"):
with ensure_clean("test.json") as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({"a": [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize(
"index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
)
@pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
def test_from_json_to_json_table_index_and_columns(self, index, columns):
# GH25433 GH25435
expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
assert_frame_equal(result, expected)
def test_from_json_to_json_table_dtypes(self):
# GH21345
expected = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
dfjson = expected.to_json(orient="table")
result = pd.read_json(dfjson, orient="table")
assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
def test_read_json_table_dtype_raises(self, dtype):
# GH21345
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
dfjson = df.to_json(orient="table")
msg = "cannot pass both dtype and orient='table'"
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient="table", dtype=dtype)
def test_read_json_table_convert_axes_raises(self):
# GH25433 GH25435
df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
dfjson = df.to_json(orient="table")
msg = "cannot pass both convert_axes and orient='table'"
with pytest.raises(ValueError, match=msg):
pd.read_json(dfjson, orient="table", convert_axes=True)
@pytest.mark.parametrize(
"data, expected",
[
(
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(
DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(
DataFrame(
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
),
{"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
),
(Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
(
Series([1, 2, 3], name="A").rename_axis("foo"),
{"name": "A", "data": [1, 2, 3]},
),
(
Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
{"name": "A", "data": [1, 2]},
),
],
)
def test_index_false_to_json_split(self, data, expected):
# GH 17394
# Testing index=False in to_json with orient='split'
result = data.to_json(orient="split", index=False)
result = json.loads(result)
assert result == expected
@pytest.mark.parametrize(
"data",
[
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
(DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
(
DataFrame(
[[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
)
),
(Series([1, 2, 3], name="A")),
(Series([1, 2, 3], name="A").rename_axis("foo")),
(Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
],
)
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
result = data.to_json(orient="table", index=False)
result = json.loads(result)
expected = {
"schema": pd.io.json.build_table_schema(data, index=False),
"data": DataFrame(data).to_dict(orient="records"),
}
assert result == expected
@pytest.mark.parametrize("orient", ["records", "index", "columns", "values"])
def test_index_false_error_to_json(self, orient):
# GH 17394
# Testing error message from to_json with index=False
df = pd.DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
msg = "'index=False' is only valid when 'orient' is 'split' or 'table'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient, index=False)
@pytest.mark.parametrize("orient", ["split", "table"])
@pytest.mark.parametrize("index", [True, False])
def test_index_false_from_json_to_json(self, orient, index):
# GH25170
# Test index=False in from_json to_json
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
dfjson = expected.to_json(orient=orient, index=index)
result = read_json(dfjson, orient=orient)
assert_frame_equal(result, expected)
def test_read_timezone_information(self):
# GH 25546
result = read_json(
'{"2019-01-01T11:00:00.000Z":88}', typ="series", orient="index"
)
expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
assert_series_equal(result, expected)
| bsd-3-clause |
maxalbert/bokeh | bokeh/properties.py | 1 | 62784 | """ Properties are objects that can be assigned as class level
attributes on Bokeh models, to provide automatic serialization
and validation.
For example, the following defines a model that has integer,
string, and list[float] properties::
class Model(HasProps):
foo = Int
bar = String
baz = List(Float)
The properties of this class can be initialized by specifying
keyword arguments to the initializer::
m = Model(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance::
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception::
>>> m.foo = 2.3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__
super(HasProps, self).__setattr__(name, value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__
raise e
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__
self.validate(value)
File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float
Additionally, properties know how to serialize themselves,
to be understood by BokehJS.
"""
from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
import collections
from copy import copy
import datetime
import dateutil.parser
import difflib
from importlib import import_module
import inspect
import numbers
import re
import types
from warnings import warn
from six import string_types, iteritems
from . import enums
from .util.future import with_metaclass
from .util.string import nice_join
from .property_containers import PropertyValueList, PropertyValueDict, PropertyValueContainer
def field(name):
''' Convenience function do explicitly mark a field specification for
a Bokeh model property.
Args:
name (str) : name of a data source field to reference for a property.
Returns:
dict : `{"field": name}`
Note:
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
'''
return dict(field=name)
def value(val):
''' Convenience function do explicitly mark a value specification for
a Bokeh model property.
Args:
val (any) : a fixed value to specify for a property.
Returns:
dict : `{"value": name}`
Note:
String values for property specifications are by default interpreted
as field names. This function is especially useful when you want to
specify a fixed value with text properties.
Example:
.. code-block:: python
# The following will take text values to render from a data source
# column "text_column", but use a fixed value "12pt" for font size
p.text("x", "y", text="text_column",
text_font_size=value("12pt"), source=source)
'''
return dict(value=val)
bokeh_bool_types = (bool,)
try:
import numpy as np
bokeh_bool_types += (np.bool8,)
except ImportError:
pass
bokeh_integer_types = (numbers.Integral,)
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class DeserializationError(Exception):
pass
class PropertyFactory(object):
""" Base class for objects that can generate Property instances. """
@classmethod
def autocreate(cls):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def make_properties(self, base_name):
""" Returns a list of Property instances. """
raise NotImplementedError("make_properties not implemented")
class PropertyDescriptor(PropertyFactory):
""" Base class for a description of a property, not associated yet with an attribute name or a class."""
def __init__(self, default=None, help=None, serialized=True):
""" This is how the descriptor is created in the class declaration. """
self._serialized = serialized
self._default = default
self.__doc__ = help
self.alternatives = []
# "fail early" when a default is invalid
self.validate(self._raw_default())
def __str__(self):
return self.__class__.__name__
def make_properties(self, base_name):
return [ BasicProperty(descriptor=self, name=base_name) ]
def _has_stable_default(self):
""" True if we have a default that will be the same every time and is not mutable."""
if isinstance(self._default, types.FunctionType):
return False
else:
return True
@classmethod
def _copy_default(cls, default):
if not isinstance(default, types.FunctionType):
return copy(default)
else:
return default()
def _raw_default(self):
""" The raw_default() needs to be validated and transformed by prepare_value() before
use, and may also be replaced later by subclass overrides or by themes."""
return self._copy_default(self._default)
def themed_default(self, cls, name, theme_overrides):
"""The default transformed by prepare_value() and the theme overrides."""
overrides = theme_overrides
if overrides is None or name not in overrides:
overrides = cls._overridden_defaults()
if name in overrides:
default = self._copy_default(overrides[name])
else:
default = self._raw_default()
return self.prepare_value(cls, name, default)
@property
def serialized(self):
"""True if the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
"""
return self._serialized
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
if new is None or old is None:
return new is old # XXX: silence FutureWarning from NumPy
else:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
# if we cannot compare (e.g. arrays) just punt return False for match
pass
return False
def from_json(self, json, models=None):
""" Convert from JSON-compatible values (list, dict, number, string, bool, None)
into a value for this property."""
return json
def transform(self, value):
"""Change the value into the canonical format for this property."""
return value
def validate(self, value):
"""Check whether we can set this property from this value (called before transform())."""
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
@classmethod
def _wrap_container(cls, value):
if isinstance(value, list):
if isinstance(value, PropertyValueList):
return value
else:
return PropertyValueList(value)
elif isinstance(value, dict):
if isinstance(value, PropertyValueDict):
return value
else:
return PropertyValueDict(value)
else:
return value
def prepare_value(self, cls, name, value):
try:
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
return self._wrap_container(value)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
tp = ParameterizedPropertyDescriptor._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def __or__(self, other):
return Either(self, other)
class Property(object):
""" A named attribute that can be read and written. """
def __init__(self, name):
self.name = name
def __str__(self):
return "Property(%s)" % (self.name)
def __get__(self, obj, owner=None):
raise NotImplementedError("Implement __get__")
def __set__(self, obj, value):
raise NotImplementedError("Implement __set__")
def __delete__(self, obj):
raise NotImplementedError("Implement __delete__")
def class_default(self, cls):
""" The default as computed for a certain class, ignoring any per-instance theming."""
raise NotImplementedError("Implement class_default()")
def serializable_value(self, obj):
"""Gets the value as it should be serialized, which differs from
the __get__ value occasionally when we allow the __get__
value to appear simpler for developer convenience.
"""
return self.__get__(obj)
def set_from_json(self, obj, json, models):
"""Sets from a JSON value.
"""
return self.__set__(obj, json)
@property
def serialized(self):
""" True if the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
"""
raise NotImplementedError("Implement serialized()")
@property
def has_ref(self):
""" True if the property can refer to another HasProps instance."""
raise NotImplementedError("Implement has_ref()")
def trigger_if_changed(self, obj, old):
""" Send a change event if the property's value is not equal to ``old``. """
raise NotImplementedError("Implement trigger_if_changed()")
class BasicProperty(Property):
""" A PropertyDescriptor associated with a class attribute name, so it can be read and written. """
def __init__(self, descriptor, name):
super(BasicProperty, self).__init__(name)
self.descriptor = descriptor
self.__doc__ = self.descriptor.__doc__
def __str__(self):
return "%s:%s" % (self.name, self.descriptor)
def class_default(self, cls):
"""Get the default value for a specific subtype of HasProps,
which may not be used for an individual instance."""
return self.descriptor.themed_default(cls, self.name, None)
def instance_default(self, obj):
""" Get the default value that will be used for a specific instance."""
return self.descriptor.themed_default(obj.__class__, self.name, obj.themed_values())
@property
def serialized(self):
return self.descriptor.serialized
def set_from_json(self, obj, json, models=None):
"""Sets using the result of serializable_value().
"""
return super(BasicProperty, self).set_from_json(obj,
self.descriptor.from_json(json, models),
models)
@property
def has_ref(self):
return self.descriptor.has_ref
def _get(self, obj):
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name]
def __get__(self, obj, owner=None):
if obj is not None:
return self._get(obj)
elif owner is not None:
return self
else:
raise ValueError("both 'obj' and 'owner' are None, don't know what to do")
def _trigger(self, obj, old, value):
if hasattr(obj, 'trigger'):
obj.trigger(self.name, old, value)
def _get_default(self, obj):
if self.name in obj._property_values:
# this shouldn't happen because we should have checked before _get_default()
raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values")
# merely getting a default may force us to put it in
# _property_values if we need to wrap the container, if
# the default is a Model that may change out from
# underneath us, or if the default is generated anew each
# time by a function.
default = self.instance_default(obj)
if not self.descriptor._has_stable_default():
if isinstance(default, PropertyValueContainer):
# this is a special-case so we can avoid returning the container
# as a non-default or application-overridden value, when
# it has not been modified.
default._unmodified_default_value = True
default._register_owner(obj, self)
obj._property_values[self.name] = default
return default
def _real_set(self, obj, old, value):
unchanged = self.descriptor.matches(value, old)
if unchanged:
return
was_set = self.name in obj._property_values
# "old" is the logical old value, but it may not be
# the actual current attribute value if our value
# was mutated behind our back and we got _notify_mutated.
if was_set:
old_attr_value = obj._property_values[self.name]
else:
old_attr_value = old
if old_attr_value is not value:
if isinstance(old_attr_value, PropertyValueContainer):
old_attr_value._unregister_owner(obj, self)
if isinstance(value, PropertyValueContainer):
value._register_owner(obj, self)
obj._property_values[self.name] = value
# for notification purposes, "old" should be the logical old
self._trigger(obj, old, value)
def __set__(self, obj, value):
if not hasattr(obj, '_property_values'):
# Initial values should be passed in to __init__, not set directly
raise RuntimeError("Cannot set a property value '%s' on a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
value = self.descriptor.prepare_value(obj.__class__, self.name, value)
old = self.__get__(obj)
self._real_set(obj, old, value)
# called when a container is mutated "behind our back" and
# we detect it with our collection wrappers. In this case,
# somewhat weirdly, "old" is a copy and the new "value"
# should already be set unless we change it due to
# validation.
def _notify_mutated(self, obj, old):
value = self.__get__(obj)
# re-validate because the contents of 'old' have changed,
# in some cases this could give us a new object for the value
value = self.descriptor.prepare_value(obj.__class__, self.name, value)
self._real_set(obj, old, value)
def __delete__(self, obj):
if self.name in obj._property_values:
del obj._property_values[self.name]
def trigger_if_changed(self, obj, old):
new_value = self.__get__(obj)
if not self.descriptor.matches(old, new_value):
self._trigger(obj, old, new_value)
class Include(PropertyFactory):
""" Include other properties from mixin Models, with a given prefix. """
def __init__(self, delegate, help="", use_prefix=True):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
self.help = help
self.use_prefix = use_prefix
def make_properties(self, base_name):
props = []
delegate = self.delegate
if self.use_prefix:
prefix = re.sub("_props$", "", base_name) + "_"
else:
prefix = ""
# it would be better if we kept the original generators from
# the delegate and built our Include props from those, perhaps.
for subpropname in delegate.properties(with_bases=False):
fullpropname = prefix + subpropname
subprop = delegate.lookup(subpropname)
if isinstance(subprop, BasicProperty):
descriptor = copy(subprop.descriptor)
if "%s" in self.help:
doc = self.help % subpropname.replace('_', ' ')
else:
doc = self.help
descriptor.__doc__ = doc
props += descriptor.make_properties(fullpropname)
return props
class Override(object):
""" Override aspects of the PropertyDescriptor from a superclass. """
def __init__(self, **kwargs):
if len(kwargs) == 0:
raise ValueError("Override() doesn't override anything, needs keyword args")
self.default_overridden = 'default' in kwargs
if self.default_overridden:
self.default = kwargs.pop('default')
if len(kwargs) > 0:
raise ValueError("Unknown keyword args to Override: %r" % (kwargs))
_EXAMPLE_TEMPLATE = """
Example
-------
.. bokeh-plot:: ../%(path)s
:source-position: none
*source:* `%(path)s <https://github.com/bokeh/bokeh/tree/master/%(path)s>`_
"""
class MetaHasProps(type):
def __new__(meta_cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# Now handle all the Override
overridden_defaults = {}
for name, prop in class_dict.items():
if not isinstance(prop, Override):
continue
if prop.default_overridden:
overridden_defaults[name] = prop.default
for name, default in overridden_defaults.items():
del class_dict[name]
generators = dict()
for name, generator in class_dict.items():
if isinstance(generator, PropertyFactory):
generators[name] = generator
elif isinstance(generator, type) and issubclass(generator, PropertyFactory):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
generators[name] = generator.autocreate()
dataspecs = {}
new_class_attrs = {}
def add_prop(prop):
name = prop.name
if name in new_class_attrs:
raise RuntimeError("Two property generators both created %s.%s" % (class_name, name))
new_class_attrs[name] = prop
names.add(name)
if prop.has_ref:
names_with_refs.add(name)
if isinstance(prop, BasicProperty):
if isinstance(prop.descriptor, ContainerProperty):
container_names.add(name)
if isinstance(prop.descriptor, DataSpec):
dataspecs[name] = prop
for name, generator in generators.items():
props = generator.make_properties(name)
replaced_self = False
for prop in props:
if prop.name in generators:
if generators[prop.name] is generator:
# a generator can replace itself, this is the
# standard case like `foo = Int()`
replaced_self = True
add_prop(prop)
else:
# if a generator tries to overwrite another
# generator that's been explicitly provided,
# use the prop that was manually provided
# and ignore this one.
pass
else:
add_prop(prop)
# if we won't overwrite ourselves anyway, delete the generator
if not replaced_self:
del class_dict[name]
class_dict.update(new_class_attrs)
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if len(overridden_defaults) > 0:
class_dict["__overridden_defaults__"] = overridden_defaults
if dataspecs:
class_dict["__dataspecs__"] = dataspecs
if "__example__" in class_dict:
path = class_dict["__example__"]
class_dict["__doc__"] += _EXAMPLE_TEMPLATE % dict(path=path)
return super(MetaHasProps, meta_cls).__new__(meta_cls, class_name, bases, class_dict)
def __init__(cls, class_name, bases, nmspc):
if class_name == 'HasProps':
return
# Check for improperly overriding a Property attribute.
# Overriding makes no sense except through the Override
# class which can be used to tweak the default.
# Historically code also tried changing the Property's
# type or changing from Property to non-Property: these
# overrides are bad conceptually because the type of a
# read-write property is invariant.
cls_attrs = cls.__dict__.keys() # we do NOT want inherited attrs here
for attr in cls_attrs:
for base in bases:
if issubclass(base, HasProps) and attr in base.properties():
warn(('Property "%s" in class %s was overridden by a class attribute ' + \
'"%s" in class %s; it never makes sense to do this. ' + \
'Either %s.%s or %s.%s should be removed, or %s.%s should not ' + \
'be a Property, or use Override(), depending on the intended effect.') %
(attr, base.__name__, attr, class_name,
base.__name__, attr,
class_name, attr,
base.__name__, attr),
RuntimeWarning, stacklevel=2)
if "__overridden_defaults__" in cls.__dict__:
our_props = cls.properties()
for key in cls.__dict__["__overridden_defaults__"].keys():
if key not in our_props:
warn(('Override() of %s in class %s does not override anything.') % (key, class_name),
RuntimeWarning, stacklevel=2)
def accumulate_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename]
def accumulate_dict_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
d = dict()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
for k,v in base.items():
if k not in d:
d[k] = v
setattr(cls, cachename, d)
return cls.__dict__[cachename]
def abstract(cls):
""" A phony decorator to mark abstract base classes. """
if not issubclass(cls, HasProps):
raise TypeError("%s is not a subclass of HasProps" % cls.__name__)
return cls
class HasProps(with_metaclass(MetaHasProps, object)):
def __init__(self, **properties):
super(HasProps, self).__init__()
self._property_values = dict()
for name, value in properties.items():
setattr(self, name, value)
def __setattr__(self, name, value):
# self.properties() below can be expensive so avoid it
# if we're just setting a private underscore field
if name.startswith("_"):
super(HasProps, self).__setattr__(name, value)
return
props = sorted(self.properties())
deprecated = getattr(self, '__deprecated_attributes__', [])
if name in props or name in deprecated:
super(HasProps, self).__setattr__(name, value)
else:
matches, text = difflib.get_close_matches(name.lower(), props), "similar"
if not matches:
matches, text = props, "possible"
raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" %
(name, self.__class__.__name__, text, nice_join(matches)))
def set_from_json(self, name, json, models=None):
"""Sets a property of the object using JSON and a dictionary from model ids to model instances.
The model instances are necessary if the JSON contains references to models.
"""
if name in self.properties():
#logger.debug("Patching attribute %s of %r", attr, patched_obj)
prop = self.lookup(name)
prop.set_from_json(self, json, models)
else:
logger.warn("JSON had attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent", name, self)
def update(self, **kwargs):
""" Updates the object's properties from the given keyword args."""
for k,v in kwargs.items():
setattr(self, k, v)
def update_from_json(self, json_attributes, models=None):
""" Updates the object's properties from a JSON attributes dictionary."""
for k, v in json_attributes.items():
self.set_from_json(k, v, models)
def _clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self._property_values)
@classmethod
def lookup(cls, name):
return getattr(cls, name)
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
return accumulate_from_superclasses(cls, "__properties_with_refs__")
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers.
"""
return accumulate_from_superclasses(cls, "__container_props__")
@classmethod
def properties(cls, with_bases=True):
"""Returns a set of the names of this object's properties. If
``with_bases`` is True, we traverse the class hierarchy
and pull together the full list of properties; if False,
we only return the properties introduced in the class
itself.
Args:
with_bases (bool, optional) : True to include properties that haven't been set.
(default: True)
Returns:
a set of property names
"""
if with_bases:
return accumulate_from_superclasses(cls, "__properties__")
else:
return set(cls.__properties__)
@classmethod
def _overridden_defaults(cls):
""" Returns a dictionary of defaults that have been overridden; this is an implementation detail of PropertyDescriptor. """
return accumulate_dict_from_superclasses(cls, "__overridden_defaults__")
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
return set(cls.dataspecs_with_props().keys())
@classmethod
def dataspecs_with_props(cls):
""" Returns a dict of dataspec names to dataspec properties. """
return accumulate_dict_from_superclasses(cls, "__dataspecs__")
def properties_with_values(self, include_defaults=True):
''' Get a dict from property names to the current values of those properties.
Non-serializable properties are skipped and property values are in "serialized"
format which may be slightly different from the values you would normally
read from the properties; the intent of this method is to return the information
needed to losslessly reconstitute the object instance.
Args:
include_defaults (bool) : True to include properties that haven't been set.
Returns:
dict : from property names to their values
'''
result = dict()
if include_defaults:
keys = self.properties()
else:
keys = self._property_values.keys()
for key in keys:
prop = self.lookup(key)
if not prop.serialized:
continue
value = prop.serializable_value(self)
if not include_defaults:
if isinstance(value, PropertyValueContainer) and value._unmodified_default_value:
continue
result[key] = value
return result
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def themed_values(self):
""" Get any theme-provided overrides as a dict from property name to value,
or None if no theme overrides any values for this instance. """
if hasattr(self, '__themed_values__'):
return getattr(self, '__themed_values__')
else:
return None
def apply_theme(self, property_values):
""" Apply a set of theme values which will be used rather than
defaults, but will not override application-set
values. The passed-in dictionary may be kept around as-is
and shared with other instances to save memory (so neither
the caller nor the HasProps instance should modify it).
"""
old_dict = None
if hasattr(self, '__themed_values__'):
old_dict = getattr(self, '__themed_values__')
# if the same theme is set again, it should reuse the
# same dict
if old_dict is property_values:
return
removed = set()
# we're doing a little song-and-dance to avoid storing __themed_values__ or
# an empty dict, if there's no theme that applies to this HasProps instance.
if old_dict is not None:
removed.update(set(old_dict.keys()))
added = set(property_values.keys())
old_values = dict()
for k in added.union(removed):
old_values[k] = getattr(self, k)
if len(property_values) > 0:
setattr(self, '__themed_values__', property_values)
elif hasattr(self, '__themed_values__'):
delattr(self, '__themed_values__')
# Emit any change notifications that result
for k, v in old_values.items():
prop = self.lookup(k)
prop.trigger_if_changed(self, v)
def unapply_theme(self):
self.apply_theme(property_values=dict())
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(PropertyDescriptor):
""" A base class for simple property types. Subclasses should
define a class attribute ``_underlying_type`` that is a tuple
of acceptable type values for the property.
"""
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s" % (self, expected, json))
class Bool(PrimitiveProperty):
""" Boolean type property. """
_underlying_type = bokeh_bool_types
class Int(PrimitiveProperty):
""" Signed integer type property. """
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
""" Floating point type property. """
_underlying_type = (numbers.Real,)
class Complex(PrimitiveProperty):
""" Complex floating point type property. """
_underlying_type = (numbers.Complex,)
class String(PrimitiveProperty):
""" String type property. """
_underlying_type = string_types
class Regex(String):
""" Regex type property validates that text values match the
given regular expression.
"""
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default, help=help)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class JSON(String):
""" JSON type property validates that text values are valid JSON.
.. note::
The string is transmitted and received by BokehJS as a *string*
containing JSON content. i.e., you must use ``JSON.parse`` to unpack
the value into a JavaScript hash.
"""
def validate(self, value):
super(JSON, self).validate(value)
if value is None: return
try:
import json
json.loads(value)
except ValueError:
raise ValueError("expected JSON text, got %r" % value)
class ParameterizedPropertyDescriptor(PropertyDescriptor):
""" Base class for Properties that have type parameters, e.g.
``List(String)``.
"""
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, PropertyDescriptor):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, PropertyDescriptor):
return type_param
raise ValueError("expected a PropertyDescriptor as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedPropertyDescriptor):
""" Base class for Container-like type properties. """
def _has_stable_default(self):
# all containers are mutable, so the default can be modified
return False
class Seq(ContainerProperty):
""" Sequence (list, tuple) type property.
"""
def _is_seq(self, value):
return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping)
def _new_instance(self, value):
return value
def __init__(self, item_type, default=None, help=None):
self.item_type = self._validate_type_param(item_type)
super(Seq, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(Seq, self).validate(value)
if value is not None:
if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)):
if self._is_seq(value):
invalid = []
for item in value:
if not self.item_type.is_valid(item):
invalid.append(item)
raise ValueError("expected an element of %s, got seq with invalid items %r" % (self, invalid))
else:
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return self._new_instance([ self.item_type.from_json(item, models) for item in json ])
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class List(Seq):
""" Python list type property.
"""
def __init__(self, item_type, default=[], help=None):
# todo: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# optional values. Also in Dict.
super(List, self).__init__(item_type, default=default, help=help)
def _is_seq(self, value):
return isinstance(value, list)
class Array(Seq):
""" NumPy array type property.
"""
def _is_seq(self, value):
import numpy as np
return isinstance(value, np.ndarray)
def _new_instance(self, value):
import numpy as np
return np.array(value)
class Dict(ContainerProperty):
""" Python dict type property.
If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}, help=None):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) }
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class Tuple(ContainerProperty):
""" Tuple type property. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help"))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, list):
return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json))
else:
raise DeserializationError("%s expected a list or None, got %s" % (self, json))
class Instance(PropertyDescriptor):
""" Instance type property, for references to other Models in the object
graph.
"""
def __init__(self, instance_type, default=None, help=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default, help=help)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
def _has_stable_default(self):
# because the instance value is mutable
return False
@property
def has_ref(self):
return True
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
def from_json(self, json, models=None):
if json is None:
return None
elif isinstance(json, dict):
from .model import Model
if issubclass(self.instance_type, Model):
if models is None:
raise DeserializationError("%s can't deserialize without models" % self)
else:
model = models.get(json["id"])
if model is not None:
return model
else:
raise DeserializationError("%s failed to deserialize reference to %s" % (self, json))
else:
attrs = {}
for name, value in iteritems(json):
prop = self.instance_type.lookup(name)
attrs[name] = prop.from_json(value, models)
# XXX: this doesn't work when Instance(Superclass) := Subclass()
# Serialization dict must carry type information to resolve this.
return self.instance_type(**attrs)
else:
raise DeserializationError("%s expected a dict or None, got %s" % (self, json))
class This(PropertyDescriptor):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(PropertyDescriptor):
""" Any type property accepts any values. """
pass
class Function(PropertyDescriptor):
""" Function type property. """
pass
class Event(PropertyDescriptor):
""" Event type property. """
pass
class Interval(ParameterizedPropertyDescriptor):
''' Range type property ensures values are contained inside a given interval. '''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
# Make up a property name for validation purposes
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
@property
def type_params(self):
return [self.interval_type]
def validate(self, value):
super(Interval, self).validate(value)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
class Byte(Interval):
''' Byte type property. '''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Either(ParameterizedPropertyDescriptor):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
help = kwargs.get("help")
def choose_default():
return self._type_params[0]._raw_default()
default = kwargs.get("default", choose_default)
super(Either, self).__init__(default=default, help=help)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def from_json(self, json, models=None):
for tp in self.type_params:
try:
return tp.from_json(json, models)
except DeserializationError:
pass
else:
raise DeserializationError("%s couldn't deserialize %s" % (self, json))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
def __or__(self, other):
return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help)
class Enum(String):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self._enum = enum
default = kwargs.get("default", enum._default)
help = kwargs.get("help")
super(Enum, self).__init__(default=default, help=help)
@property
def allowed_values(self):
return self._enum._values
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self._enum):
raise ValueError("invalid value: %r; allowed values are %s" % (value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
class Auto(Enum):
def __init__(self):
super(Auto, self).__init__("auto")
def __str__(self):
return self.__class__.__name__
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None, help=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class Align(PropertyDescriptor):
pass
class DashPattern(Either):
""" Dash type property.
Express patterns that describe line dashes. ``DashPattern`` values
can be specified in a variety of ways:
* An enum: "solid", "dashed", "dotted", "dotdash", "dashdot"
* a tuple or list of integers in the `HTML5 Canvas dash specification style`_.
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
To indicate that dashing is turned off (solid lines), specify the empty
list [].
.. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[], help=None):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int)
super(DashPattern, self).__init__(*types, default=default, help=help)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Size type property.
.. note::
``Size`` is equivalent to an unsigned int.
"""
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percentage type property.
Percents are useful for specifying alphas and coverage and extents; more
semantically meaningful than Float(0..1).
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
""" Angle type property. """
pass
class Date(PropertyDescriptor):
""" Date (not datetime) type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Date, self).__init__(default=default, help=help)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(PropertyDescriptor):
""" Datetime type property.
"""
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def validate(self, value):
super(Datetime, self).validate(value)
datetime_types = (datetime.datetime, datetime.date)
try:
import numpy as np
datetime_types += (np.datetime64,)
except ImportError:
pass
if (isinstance(value, datetime_types)):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
""" RelativeDelta type property for time deltas.
"""
def __init__(self, default={}, help=None):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default, help=help)
def __str__(self):
return self.__class__.__name__
class DataSpecProperty(BasicProperty):
""" A Property with a DataSpec descriptor."""
def serializable_value(self, obj):
return self.descriptor.to_serializable(obj, self.name, getattr(obj, self.name))
def set_from_json(self, obj, json, models=None):
# we want to try to keep the "format" of the data spec as string, dict, or number,
# assuming the serialized dict is compatible with that.
old = getattr(obj, self.name)
if old is not None:
try:
self.descriptor._type.validate(old)
if 'value' in json:
json = json['value']
except ValueError:
if isinstance(old, string_types) and 'field' in json:
json = json['field']
# leave it as a dict if 'old' was a dict
super(DataSpecProperty, self).set_from_json(obj, json, models)
class DataSpec(Either):
def __init__(self, typ, default, help=None):
super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help)
self._type = self._validate_type_param(typ)
def make_properties(self, base_name):
return [ DataSpecProperty(descriptor=self, name=base_name) ]
def to_serializable(self, obj, name, val):
# Check for None value; this means "the whole thing is
# unset," not "the value is None."
if val is None:
return None
# Check for spec type value
try:
self._type.validate(val)
return dict(value=val)
except ValueError:
pass
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
class NumberSpec(DataSpec):
def __init__(self, default=None, help=None):
super(NumberSpec, self).__init__(Float, default=default, help=help)
class StringSpec(DataSpec):
def __init__(self, default, help=None):
super(StringSpec, self).__init__(List(String), default=default, help=help)
def prepare_value(self, cls, name, value):
if isinstance(value, list):
if len(value) != 1:
raise TypeError("StringSpec convenience list values must have length 1")
value = dict(value=value[0])
return super(StringSpec, self).prepare_value(cls, name, value)
class FontSizeSpec(DataSpec):
def __init__(self, default, help=None):
super(FontSizeSpec, self).__init__(List(String), default=default, help=help)
def prepare_value(self, cls, name, value):
if isinstance(value, string_types):
warn('Setting a fixed font size value as a string %r is deprecated, '
'set with value(%r) or [%r] instead' % (value, value, value),
DeprecationWarning, stacklevel=2)
if len(value) > 0 and value[0].isdigit():
value = dict(value=value)
return super(FontSizeSpec, self).prepare_value(cls, name, value)
class UnitsSpecProperty(DataSpecProperty):
""" A Property that sets a matching `_units` property as a side effect."""
def __init__(self, descriptor, name, units_prop):
super(UnitsSpecProperty, self).__init__(descriptor, name)
self.units_prop = units_prop
def _extract_units(self, obj, value):
if isinstance(value, dict):
units = value.pop("units", None)
if units:
self.units_prop.__set__(obj, units)
def __set__(self, obj, value):
self._extract_units(obj, value)
super(UnitsSpecProperty, self).__set__(obj, value)
def set_from_json(self, obj, json, models=None):
self._extract_units(obj, json)
super(UnitsSpecProperty, self).set_from_json(obj, json, models)
class UnitsSpec(NumberSpec):
def __init__(self, default, units_type, units_default, help=None):
super(UnitsSpec, self).__init__(default=default, help=help)
self._units_type = self._validate_type_param(units_type)
# this is a hack because we already constructed units_type
self._units_type.validate(units_default)
self._units_type._default = units_default
# this is sort of a hack because we don't have a
# serialized= kwarg on every PropertyDescriptor subtype
self._units_type._serialized = False
def make_properties(self, base_name):
units_name = base_name + "_units"
units_props = self._units_type.make_properties(units_name)
return units_props + [ UnitsSpecProperty(descriptor=self, name=base_name, units_prop=units_props[0]) ]
def to_serializable(self, obj, name, val):
d = super(UnitsSpec, self).to_serializable(obj, name, val)
if d is not None and 'units' not in d:
d["units"] = getattr(obj, name+"_units")
return d
def __str__(self):
return "%s(units_default=%r)" % (self.__class__.__name__, self._units_type._default)
class AngleSpec(UnitsSpec):
def __init__(self, default=None, units_default="rad", help=None):
super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help)
class DistanceSpec(UnitsSpec):
def __init__(self, default=None, units_default="data", help=None):
super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help)
def prepare_value(self, cls, name, value):
try:
if value is not None and value < 0:
raise ValueError("Distances must be positive or None!")
except TypeError:
pass
return super(DistanceSpec, self).prepare_value(cls, name, value)
class ScreenDistanceSpec(NumberSpec):
def to_serializable(self, obj, name, val):
d = super(ScreenDistanceSpec, self).to_serializable(obj, name, val)
d["units"] = "screen"
return d
def prepare_value(self, cls, name, value):
try:
if value is not None and value < 0:
raise ValueError("Distances must be positive or None!")
except TypeError:
pass
return super(ScreenDistanceSpec, self).prepare_value(cls, name, value)
class DataDistanceSpec(NumberSpec):
def to_serializable(self, obj, name, val):
d = super(ScreenDistanceSpec, self).to_serializable(obj, name, val)
d["units"] = "data"
return d
def prepare_value(self, cls, name, value):
try:
if value is not None and value < 0:
raise ValueError("Distances must be positive or None!")
except TypeError:
pass
return super(DataDistanceSpec, self).prepare_value(cls, name, value)
class ColorSpec(DataSpec):
def __init__(self, default, help=None):
super(ColorSpec, self).__init__(Color, default=default, help=help)
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor)
@classmethod
def is_color_tuple(cls, val):
return isinstance(val, tuple) and len(val) in (3, 4)
@classmethod
def format_tuple(cls, colortuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
def to_serializable(self, obj, name, val):
if val is None:
return dict(value=None)
# Check for hexadecimal or named color
if self.isconst(val):
return dict(value=val)
# Check for RGB or RGBa tuple
if isinstance(val, tuple):
return dict(value=self.format_tuple(val))
# Check for data source field name
if isinstance(val, string_types):
return dict(field=val)
# Must be dict, return as-is
return val
def validate(self, value):
try:
return super(ColorSpec, self).validate(value)
except ValueError as e:
# Check for tuple input if not yet a valid input type
if self.is_color_tuple(value):
return True
else:
raise e
def transform(self, value):
# Make sure that any tuple has either three integers, or three integers and one float
if isinstance(value, tuple):
value = tuple(int(v) if i < 3 else v for i, v in enumerate(value))
return value
| bsd-3-clause |
Evfro/polara | polara/recommender/models.py | 1 | 47667 | from functools import wraps
from collections import namedtuple
import warnings
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
import pandas as pd
import numpy as np
import scipy as sp
from scipy.sparse import coo_matrix, csr_matrix
from scipy.sparse.linalg import svds
from polara.recommender import defaults
from polara.recommender.evaluation import get_hits, get_relevance_scores, get_ranking_scores, get_experience_scores
from polara.recommender.evaluation import get_hr_score, get_rr_scores
from polara.recommender.evaluation import assemble_scoring_matrices
from polara.recommender.evaluation import matrix_from_observations
from polara.recommender.utils import array_split
from polara.lib.optimize import simple_pmf_sgd
from polara.lib.tensor import hooi
from polara.preprocessing.matrices import rescale_matrix
from polara.lib.sampler import mf_random_item_scoring
from polara.lib.sparse import sparse_dot, inverse_permutation
from polara.lib.sparse import inner_product_at
from polara.lib.sparse import unfold_tensor_coordinates, tensor_outer_at
from polara.tools.timing import track_time
def get_default(name):
return defaults.get_config([name])[name]
def clean_build_decorator(build_func):
# this ensures that every time the build function is called,
# all cached recommendations are cleared
@wraps(build_func)
def wrapper(self, *args, **kwargs):
self._is_ready = False
self._recommendations = None
build_res = build_func(self, *args, **kwargs)
self._is_ready = True
return build_res
return wrapper
def with_metaclass(mcls):
# this is used to ensure python 2/3 interoperablity, taken from:
# https://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
class MetaModel(type):
# performs cleaning of the instance when build method is called
# propagates the action to any subclasses, key idea is borrowed from here:
# https://stackoverflow.com/questions/18858759/python-decorating-a-class-method-that-is-intended-to-be-overwritten-when-inheri
def __new__(mcs, name, bases, clsdict):
cls = super(MetaModel, mcs).__new__(mcs, name, bases, clsdict)
if 'build' in clsdict:
setattr(cls, 'build', clean_build_decorator(clsdict['build']))
return cls
@with_metaclass(MetaModel)
class RecommenderModel(object):
_config = ('topk', 'filter_seen', 'switch_positive', 'feedback_threshold', 'verify_integrity')
_pad_const = -1 # used for sparse data
def __init__(self, recommender_data, feedback_threshold=None):
self.data = recommender_data
self._recommendations = None
self.method = 'ABC'
self._topk = get_default('topk')
self._filter_seen = get_default('filter_seen')
self._feedback_threshold = feedback_threshold or get_default('feedback_threshold')
self.switch_positive = get_default('switch_positive')
self.verify_integrity = get_default('verify_integrity')
self.max_test_workers = get_default('max_test_workers')
# TODO sorting in data must be by self._prediction_key, also need to change get_test_data
self._prediction_key = self.data.fields.userid
self._prediction_target = self.data.fields.itemid
self._is_ready = False
self.verbose = True
self.training_time = [] # setting to None will prevent storing time
self.data.subscribe(self.data.on_change_event, self._renew_model)
self.data.subscribe(self.data.on_update_event, self._refresh_model)
@property
def recommendations(self):
if self._recommendations is None:
if not self._is_ready:
if self.verbose:
print('{} model is not ready. Rebuilding.'.format(self.method))
self.build()
self._recommendations = self.get_recommendations()
return self._recommendations
def _renew_model(self):
self._recommendations = None
self._is_ready = False
def _refresh_model(self):
self._recommendations = None
@property
def topk(self):
return self._topk
@topk.setter
def topk(self, new_value):
# support rolling back scenarion for @k calculations
if (self._recommendations is not None) and (new_value > self._recommendations.shape[1]):
self._recommendations = None # if topk is too high - recalculate recommendations
self._topk = new_value
@property
def feedback_threshold(self):
return self._feedback_threshold
@feedback_threshold.setter
def feedback_threshold(self, new_value):
if self._feedback_threshold != new_value:
self._feedback_threshold = new_value
self._renew_model()
@property
def filter_seen(self):
return self._filter_seen
@filter_seen.setter
def filter_seen(self, new_value):
if self._filter_seen != new_value:
self._filter_seen = new_value
self._refresh_model()
def get_base_configuration(self):
config = {attr: getattr(self, attr) for attr in self._config}
return config
def build(self):
raise NotImplementedError('This must be implemented in subclasses')
def get_training_matrix(self, feedback_threshold=None, ignore_feedback=False,
sparse_format='csr', dtype=None):
threshold = feedback_threshold or self.feedback_threshold
# the line below also updates data if needed and triggers notifier
idx, val, shp = self.data.to_coo(tensor_mode=False,
feedback_threshold=threshold)
dtype = dtype or val.dtype
if ignore_feedback: # for compatibility with non-numeric tensor feedback data
val = np.ones_like(val, dtype=dtype)
matrix = coo_matrix((val, (idx[:, 0], idx[:, 1])),
shape=shp, dtype=dtype)
if sparse_format == 'csr':
return matrix.tocsr()
elif sparse_format == 'csc':
return matrix.tocsc()
elif sparse_format == 'coo':
matrix.sum_duplicates()
return matrix
def get_test_matrix(self, test_data=None, shape=None, user_slice=None, dtype=None, ignore_feedback=False):
if test_data is None:
test_data, shape, _ = self._get_test_data()
elif shape is None:
raise ValueError('Shape of test data must be provided')
num_users_all = shape[0]
if user_slice:
start, stop = user_slice
stop = min(stop, num_users_all)
num_users = stop - start
coo_data = self._slice_test_data(test_data, start, stop)
else:
num_users = num_users_all
coo_data = test_data
user_coo, item_coo, fdbk_coo = coo_data
valid_fdbk = fdbk_coo != 0
if not valid_fdbk.all():
user_coo = user_coo[valid_fdbk]
item_coo = item_coo[valid_fdbk]
fdbk_coo = fdbk_coo[valid_fdbk]
dtype = dtype or fdbk_coo.dtype
if ignore_feedback: # for compatibility with non-numeric tensor feedback data
fdbk_coo = np.ones_like(fdbk_coo, dtype=dtype)
num_items = shape[1]
test_matrix = csr_matrix((fdbk_coo, (user_coo, item_coo)),
shape=(num_users, num_items),
dtype=dtype)
return test_matrix, coo_data
def _get_slices_idx(self, shape, result_width=None, scores_multiplier=None, dtypes=None):
result_width = result_width or self.topk
if scores_multiplier is None:
try:
fdbk_dim = self.factors.get(self.data.fields.feedback, None).shape
scores_multiplier = fdbk_dim[1]
except AttributeError:
scores_multiplier = 1
slices_idx = array_split(shape, result_width, scores_multiplier, dtypes=dtypes)
return slices_idx
def _get_test_data(self, feedback_threshold=None):
try:
tensor_mode = self.factors.get(self.data.fields.feedback, None) is not None
except AttributeError:
tensor_mode = False
test_shape = self.data.get_test_shape(tensor_mode=tensor_mode)
threshold = feedback_threshold or self.feedback_threshold
if self.data.warm_start:
if threshold:
print('Specifying threshold has no effect in warm start.')
threshold = None
else:
if self.data.test_sample and (threshold is not None):
print('Specifying both threshold value and test_sample may change test data.')
user_idx, item_idx, feedback = self.data.test_to_coo(tensor_mode=tensor_mode, feedback_threshold=threshold)
idx_diff = np.diff(user_idx)
# TODO sorting by self._prediction_key
assert (idx_diff >= 0).all() # calculations assume testset is sorted by users!
# TODO only required when testset consists of known users
if (idx_diff > 1).any() or (user_idx.min() != 0): # check index monotonicity
test_users = user_idx[np.r_[0, np.where(idx_diff)[0]+1]]
user_idx = np.r_[0, np.cumsum(idx_diff > 0)].astype(user_idx.dtype)
else:
test_users = np.arange(test_shape[0])
test_data = (user_idx, item_idx, feedback)
return test_data, test_shape, test_users
@staticmethod
def _slice_test_data(test_data, start, stop):
user_coo, item_coo, fdbk_coo = test_data
slicer = (user_coo >= start) & (user_coo < stop)
# always slice over users only
user_slice_coo = user_coo[slicer] - start
item_slice_coo = item_coo[slicer]
fdbk_slice_coo = fdbk_coo[slicer]
return (user_slice_coo, item_slice_coo, fdbk_slice_coo)
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
raise NotImplementedError('This must be implemented in subclasses')
def _user_scores(self, i):
# should not be exposed, designed for use within framework
# operates on internal itemid's
if not self._is_ready:
if self.verbose:
print('{} model is not ready. Rebuilding.'.format(self.method))
self.build()
test_data, test_shape, test_users = self._get_test_data()
if not self.data.warm_start:
i, = np.where(test_users == i)[0]
scores, seen_idx = self.slice_recommendations(test_data, test_shape, i, i+1)
if self.filter_seen:
self.downvote_seen_items(scores, seen_idx)
return scores, seen_idx
def _make_user(self, user_info):
# converts external user info into internal representation
userid, itemid, feedback = self.data.fields
if isinstance(user_info, dict): # item:feedback dictionary
items_data, feedback_data = zip(*user_info.items())
elif isinstance(user_info, (list, tuple, set, np.ndarray)): # list of items
items_data = user_info
feedback_data = {}
if feedback is not None:
feedback_val = self.data.training[feedback].max()
feedback_data = {feedback: [feedback_val]*len(items_data)}
else:
raise ValueError("Unrecognized input for `user_info`.")
try:
item_index = self.data.index.itemid.training
except AttributeError:
item_index = self.data.index.itemid
# need to convert itemid's to internal representation
# conversion is not required for feedback (it's made in *to_coo functions, if needed)
items_data = item_index.set_index('old').loc[items_data, 'new'].values
user_data = {userid: [0]*len(items_data), itemid: items_data}
user_data.update(feedback_data)
return pd.DataFrame(user_data)
def show_recommendations(self, user_info, topk=None):
# convenience function to model users and get recs
# operates on external itemid's
if isinstance(user_info, int):
scores, seen_idx = self._user_scores(user_info)
else:
testset = self.data.test.testset
holdout = self.data.test.holdout
user_data = self._make_user(user_info)
try:
# makes a "fake" test user
self.data._test = namedtuple('TestData', 'testset holdout')._make([user_data, None])
scores, seen_idx = self._user_scores(0)
finally:
# restore original data - prevent information loss
self.data._test = namedtuple('TestData', 'testset holdout')._make([testset, holdout])
_topk = self.topk
if topk is not None:
self.topk = topk
try:
# takes care of both sparse and dense recommendation lists
top_recs = self.get_topk_elements(scores).squeeze() # remove singleton
finally:
self.topk = _topk
seen_idx = seen_idx[1] # only items idx
# covert back to external representation
item_index = self.data.get_entity_index(self.data.fields.itemid)
item_idx_map = item_index.set_index('new')
top_recs = item_idx_map.loc[top_recs, 'old'].values
seen_items = item_idx_map.loc[seen_idx, 'old'].values
return top_recs, seen_items
def _slice_recommender(self, user_slice, test_data, test_shape, test_users):
start, stop = user_slice
scores, slice_data = self.slice_recommendations(test_data, test_shape, start, stop, test_users)
if self.filter_seen:
# prevent seen items from appearing in recommendations
# NOTE: in case of sparse models (e.g. simple item-to-item)
# there's a risk of having seen items in recommendations list
# (for topk < i2i_matrix.shape[1]-len(unseen))
# this is related to low generalization ability
# of the naive cooccurrence method itself, not to the algorithm
self.downvote_seen_items(scores, slice_data)
top_recs = self.get_topk_elements(scores)
return top_recs
def run_parallel_recommender(self, result, user_slices, *args):
with ThreadPoolExecutor(max_workers=self.max_test_workers) as executor:
recs_futures = {executor.submit(self._slice_recommender,
user_slice, *args): user_slice
for user_slice in user_slices}
for future in as_completed(recs_futures):
start, stop = recs_futures[future]
result[start:stop, :] = future.result()
def run_sequential_recommender(self, result, user_slices, *args):
for user_slice in user_slices:
start, stop = user_slice
result[start:stop, :] = self._slice_recommender(user_slice, *args)
def get_recommendations(self):
if self.verify_integrity:
self.verify_data_integrity()
test_data = self._get_test_data()
test_shape = test_data[1]
user_slices_idx = self._get_slices_idx(test_shape)
user_slices = zip(user_slices_idx[:-1], user_slices_idx[1:])
top_recs = np.empty((test_shape[0], self.topk), dtype=np.int64)
if self.max_test_workers and len(user_slices_idx) > 2:
self.run_parallel_recommender(top_recs, user_slices, *test_data)
else:
self.run_sequential_recommender(top_recs, user_slices, *test_data)
return top_recs
def evaluate(self, metric_type='all', topk=None, not_rated_penalty=None,
switch_positive=None, ignore_feedback=False, simple_rates=False,
on_feedback_level=None):
if metric_type == 'all':
metric_type = ['hits', 'relevance', 'ranking', 'experience']
if metric_type == 'main':
metric_type = ['relevance', 'ranking']
if not isinstance(metric_type, (list, tuple)):
metric_type = [metric_type]
if int(topk or 0) > self.topk:
self.topk = topk # will also flush old recommendations
# support rolling back scenario for @k calculations
recommendations = self.recommendations[:, :topk] # will recalculate if empty
switch_positive = switch_positive or self.switch_positive
feedback = self.data.fields.feedback
holdout = self.data.test.holdout
if (switch_positive is None) or (feedback is None):
# all recommendations are considered positive predictions
# this is a proper setting for binary data problems (implicit feedback)
# in this case all unrated items, recommended by an algorithm
# assumed to be "honest" false positives and therefore penalty equals 1
not_rated_penalty = 1 if not_rated_penalty is None else not_rated_penalty
is_positive = None
else:
# if data is not binary (explicit feedback), the intuition is different
# it becomes unclear whether unrated items are "honest" false positives
# as among these items can be both top rated and down-rated
# the defualt setting in this case is to ignore such items at all
# by setting penalty to 0, however, it is adjustable
not_rated_penalty = not_rated_penalty or 0
is_positive = (holdout[feedback] >= switch_positive).values
feedback = None if ignore_feedback else feedback
scoring_data = assemble_scoring_matrices(recommendations, holdout,
self._prediction_key, self._prediction_target,
is_positive, feedback=feedback)
scores = []
if 'relevance' in metric_type: # no need for feedback
if (self.data.holdout_size == 1) or simple_rates:
scores.append(get_hr_score(scoring_data[1]))
else:
scores.append(get_relevance_scores(*scoring_data, not_rated_penalty=not_rated_penalty))
if 'ranking' in metric_type:
if (self.data.holdout_size == 1) or simple_rates:
scores.append(get_rr_scores(scoring_data[1]))
else:
ndcg_alternative = get_default('ndcg_alternative')
topk = recommendations.shape[1] # handle topk=None case
# topk has to be passed explicitly, otherwise it's unclear how to
# estimate ideal ranking for NDCG and NDCL metrics in get_ndcr_discounts
# it's also used in MAP calculation
scores.append(get_ranking_scores(*scoring_data, topk=topk, switch_positive=switch_positive, alternative=ndcg_alternative))
if 'experience' in metric_type: # no need for feedback
fields = self.data.fields
# support custom scenarios, e.g. coldstart
entity_type = fields._fields[fields.index(self._prediction_target)]
entity_index = getattr(self.data.index, entity_type)
try:
n_entities = entity_index.shape[0]
except AttributeError:
n_entities = entity_index.training.shape[0]
scores.append(get_experience_scores(recommendations, n_entities))
if 'hits' in metric_type: # no need for feedback
scores.append(get_hits(*scoring_data, not_rated_penalty=not_rated_penalty))
if not scores:
raise NotImplementedError
if len(scores) == 1:
scores = scores[0]
return scores
@staticmethod
def topsort(a, topk):
parted = np.argpartition(a, -topk)[-topk:]
return parted[np.argsort(-a[parted])]
@staticmethod
def downvote_seen_items(recs, idx_seen):
# NOTE for sparse scores matrix this method can lead to a slightly worse
# results (comparing to the same method but with "densified" scores matrix)
# models with sparse scores can alleviate that by extending recommendations
# list with most popular items or items generated by a more sophisticated logic
idx_seen = idx_seen[:2] # need only users and items
if sp.sparse.issparse(recs):
ind_data = np.ones(len(idx_seen[0]), dtype=np.bool) # indicator
seen = coo_matrix((ind_data, idx_seen), shape=recs.shape, copy=False)
seen_recs = recs.multiply(seen)
# In the sparse case it's impossible to downvote seen items scores
# without making matrix dense. Have to simply make them 0.
recs -= seen_recs
# This, however, differs from the dense case results as seen
# items may appear earlier in the top-k list due to randomization
else:
try:
idx_seen_flat = np.ravel_multi_index(idx_seen, recs.shape)
except ValueError:
# make compatible for single user recommendations
idx_seen_flat = idx_seen
seen_data = recs.flat[idx_seen_flat]
# move seen items scores below minimum value
lowered = recs.min() - (seen_data.max() - seen_data) - 1
recs.flat[idx_seen_flat] = lowered
def get_topk_elements(self, scores):
topk = self.topk
if sp.sparse.issparse(scores):
assert scores.format == 'csr'
# there can be less then topk values in some rows
# need to extend sorted scores to conform with evaluation matrix shape
# can do this by adding -1's to the right, however:
# this relies on the fact that there are no -1's in evaluation matrix
# NOTE need to ensure that this is always true
def topscore(x, k):
data = x.data.values
cols = x.cols.values
nnz = len(data)
if k >= nnz:
cols_sorted = cols[np.argsort(-data)]
# need to pad values to conform with evaluation matrix shape
res = np.pad(cols_sorted, (0, k-nnz),
'constant', constant_values=self._pad_const)
else:
# TODO verify, that even if k is relatively small, then
# argpartition doesn't add too much overhead?
res = cols[self.topsort(data, k)]
return res
idx = scores.nonzero()
row_data = pd.DataFrame({'data': scores.data, 'cols': idx[1]}).groupby(idx[0], sort=True)
nnz_users = row_data.grouper.levels[0]
num_users = scores.shape[0]
if len(nnz_users) < num_users:
# scores may have zero-valued rows, this breaks get_topk_elements
# as scores.nonzero() will filter out indices of those rows.
# Need to restore full data with zeros in that case.
recs = np.empty((num_users, topk), dtype=idx[1].dtype)
zero_rows = np.in1d(np.arange(num_users), nnz_users, assume_unique=True, invert=True)
recs[zero_rows, :] = self._pad_const
recs[~zero_rows, :] = np.stack(row_data.apply(topscore, topk).tolist())
else:
recs = np.stack(row_data.apply(topscore, topk).tolist())
else:
# apply_along_axis is more memory efficient then argsort on full array
recs = np.apply_along_axis(self.topsort, 1, scores, topk)
return recs
@staticmethod
def orthogonalize(u, v, complete=False):
Qu, Ru = np.linalg.qr(u)
Qv, Rv = np.linalg.qr(v)
if complete:
# it's not needed for folding-in, as Ur and Vr will cancel out anyway
Ur, Sr, Vr = np.linalg.svd(Ru.dot(Rv.T))
U = Qu.dot(Ur)
V = Qv.dot(Vr.T)
else:
U, V = Qu, Qv
return U, V
def verify_data_integrity(self):
data = self.data
userid, itemid, feedback = data.fields
try:
item_index = data.index.itemid.training
except AttributeError:
item_index = data.index.itemid
nunique_items = data.training[itemid].nunique()
assert nunique_items == item_index.shape[0]
assert nunique_items == data.training[itemid].max() + 1
testset = data.test.testset
if testset is not None:
nunique_test_users = testset[userid].nunique()
if data._state == 4:
assert nunique_test_users == testset[userid].max() + 1
try:
assert self.factors.get(itemid, None).shape[0] == item_index.shape[0]
assert self.factors.get(feedback, None).shape[0] == data.index.feedback.shape[0]
except AttributeError:
pass
class NonPersonalized(RecommenderModel):
def __init__(self, kind, *args, **kwargs):
deprecation_msg = '''This is a deprecated method.
Use either PopularityModel or RandomModel instead.'''
warnings.warn(deprecation_msg, DeprecationWarning)
super(NonPersonalized, self).__init__(*args, **kwargs)
self.method = kind
def build(self):
pass
def get_recommendations(self):
userid, itemid, feedback = self.data.fields
test_data = self.data.test.testset
test_idx = (test_data[userid].values.astype(np.int64),
test_data[itemid].values.astype(np.int64))
num_users = self.data.test.testset[userid].max() + 1
if self.method == 'mostpopular':
items_scores = self.data.training.groupby(itemid, sort=True).size().values
# scores = np.lib.stride_tricks.as_strided(items_scores, (num_users, items_scores.size), (0, items_scores.itemsize))
scores = np.repeat(items_scores[None, :], num_users, axis=0)
elif self.method == 'random':
num_items = self.data.training[itemid].max() + 1
scores = np.random.random((num_users, num_items))
elif self.method == 'topscore':
items_scores = self.data.training.groupby(itemid, sort=True)[feedback].sum().values
scores = np.repeat(items_scores[None, :], num_users, axis=0)
else:
raise NotImplementedError
if self.filter_seen:
# prevent seen items from appearing in recommendations
self.downvote_seen_items(scores, test_idx)
top_recs = self.get_topk_elements(scores)
return top_recs
class PopularityModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(PopularityModel, self).__init__(*args, **kwargs)
self.method = 'MP'
self.by_feedback_value = False
def build(self):
itemid = self.data.fields.itemid
item_groups = self.data.training.groupby(itemid, sort=True)
if self.by_feedback_value:
feedback = self.data.fields.feedback
self.item_scores = item_groups[feedback].sum().values
else:
self.item_scores = item_groups.size().values
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
slice_data = self._slice_test_data(test_data, start, stop)
n_users = stop - start
scores = np.repeat(self.item_scores[None, :], n_users, axis=0)
return scores, slice_data
class RandomModel(RecommenderModel):
def __init__(self, *args, **kwargs):
self.seed = kwargs.pop('seed', None)
super(RandomModel, self).__init__(*args, **kwargs)
self.method = 'RND'
def build(self):
try:
index_data = self.data.index.itemid.training
except AttributeError:
index_data = self.data.index.itemid
self.n_items = index_data.shape[0]
seed = self.seed
self._random_state = np.random.RandomState(seed) if seed is not None else np.random
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
slice_data = self._slice_test_data(test_data, start, stop)
n_users = stop - start
scores = self._random_state.rand(n_users, self.n_items)
return scores, slice_data
class CooccurrenceModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(CooccurrenceModel, self).__init__(*args, **kwargs)
self.method = 'item-to-item' # pick some meaningful name
self.implicit = False
self.dense_output = False
def build(self):
user_item_matrix = self.get_training_matrix()
if self.implicit:
# np.sign allows for negative values as well
user_item_matrix.data = np.sign(user_item_matrix.data)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
i2i_matrix = user_item_matrix.T.dot(user_item_matrix) # gives CSC format
i2i_matrix.setdiag(0) # exclude "self-links"
i2i_matrix.eliminate_zeros()
self._i2i_matrix = i2i_matrix
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
test_matrix, slice_data = self.get_test_matrix(test_data, shape, (start, stop))
# NOTE CSR format is mandatory for proper handling of signle user
# recommendations, as vector of shape (1, N) in CSC format is inefficient
if self.implicit:
test_matrix.data = np.sign(test_matrix.data)
scores = sparse_dot(test_matrix, self._i2i_matrix, self.dense_output, True)
return scores, slice_data
class ProbabilisticMF(RecommenderModel):
def __init__(self, *args, **kwargs):
self.seed = kwargs.pop('seed', None)
super().__init__(*args, **kwargs)
self.method = 'PMF'
self.optimizer = simple_pmf_sgd
self.learn_rate = 0.005
self.sigma = 1
self.num_epochs = 25
self.rank = 10
self.tolerance = 1e-4
self.factors = {}
self.rmse_history = None
self.show_rmse = False
self.iterations_time = None
def build(self, *args, **kwargs):
matrix = self.get_training_matrix(sparse_format='coo', dtype='f8')
user_idx, item_idx = matrix.nonzero()
interactions = (user_idx, item_idx, matrix.data)
nonzero_count = (matrix.getnnz(axis=1), matrix.getnnz(axis=0))
rank = self.rank
lrate = self.learn_rate
sigma = self.sigma
num_epochs = self.num_epochs
tol = self.tolerance
self.rmse_history = []
self.iterations_time = []
general_config = dict(seed=self.seed,
verbose=self.show_rmse,
iter_errors=self.rmse_history,
iter_time=self.iterations_time)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
P, Q = self.optimizer(interactions, matrix.shape, nonzero_count, rank,
lrate, sigma, num_epochs, tol,
*args,
**kwargs,
**general_config)
self.factors[self.data.fields.userid] = P
self.factors[self.data.fields.itemid] = Q
def get_recommendations(self):
if self.data.warm_start:
raise NotImplementedError
else:
return super().get_recommendations()
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
slice_data = self._slice_test_data(test_data, start, stop)
user_factors = self.factors[userid][test_users[start:stop], :]
item_factors = self.factors[itemid]
scores = user_factors.dot(item_factors.T)
return scores, slice_data
class EmbeddingsMixin:
@property
def user_embeddings(self):
return self.factors[self.data.fields.userid]
@property
def item_embeddings(self):
return self.factors[self.data.fields.itemid]
class SVDModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(SVDModel, self).__init__(*args, **kwargs)
self._rank = defaults.svd_rank
self.method = 'PureSVD'
self.factors = {}
@property
def rank(self):
return self._rank
@rank.setter
def rank(self, new_value):
if new_value != self._rank:
self._rank = new_value
self._check_reduced_rank(new_value)
self._recommendations = None
def _check_reduced_rank(self, rank):
for entity, factor in self.factors.items():
if factor is None:
continue
if factor.shape[-1] < rank:
self._is_ready = False
self.factors = dict.fromkeys(self.factors.keys())
break
else:
# avoid accidental overwrites if factors backup exists
self.factors = dict(**self.factors)
# ellipsis allows to handle 1d array of singular values
self.factors[entity] = factor[..., :rank]
def build(self, operator=None, return_factors='vh'):
if operator is not None:
svd_matrix = operator
else:
svd_matrix = self.get_training_matrix(dtype=np.float64)
svd_params = dict(k=self.rank, return_singular_vectors=return_factors)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
user_factors, sigma, item_factors = svds(svd_matrix, **svd_params)
if user_factors is not None:
user_factors = np.ascontiguousarray(user_factors[:, ::-1])
if item_factors is not None:
item_factors = np.ascontiguousarray(item_factors[::-1, :]).T
if sigma is not None:
sigma = np.ascontiguousarray(sigma[::-1])
self.factors[self.data.fields.userid] = user_factors
self.factors[self.data.fields.itemid] = item_factors
self.factors['singular_values'] = sigma
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
test_matrix, slice_data = self.get_test_matrix(test_data, shape, (start, stop))
v = self.factors[self.data.fields.itemid]
scores = (test_matrix.dot(v)).dot(v.T)
return scores, slice_data
class ScaledMatrixMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._col_scaling = 0.4
self._row_scaling = 1
self.method = f'{self.method}-s'
@property
def col_scaling(self):
return self._col_scaling
@property
def row_scaling(self):
return self._row_scaling
@col_scaling.setter
def col_scaling(self, new_value):
if new_value != self._col_scaling:
self._col_scaling = new_value
self._recommendations = None
@row_scaling.setter
def row_scaling(self, new_value):
if new_value != self._row_scaling:
self._row_scaling = new_value
self._recommendations = None
def get_training_matrix(self, *args, **kwargs):
scaled_matrix = super().get_training_matrix(*args, **kwargs)
scaled_matrix = rescale_matrix(scaled_matrix, self.row_scaling, 1)
scaled_matrix = rescale_matrix(scaled_matrix, self.col_scaling, 0)
return scaled_matrix
class ScaledSVD(ScaledMatrixMixin, SVDModel): pass
class CoffeeModel(RecommenderModel):
def __init__(self, *args, **kwargs):
super(CoffeeModel, self).__init__(*args, **kwargs)
self._mlrank = defaults.mlrank
self.factors = {}
self.chunk = defaults.test_chunk_size
self.method = 'CoFFee'
self._flattener = defaults.flattener
self.growth_tol = defaults.growth_tol
self.num_iters = defaults.num_iters
self.show_output = defaults.show_output
self.seed = None
self._vectorize_target = defaults.test_vectorize_target
self.parallel_ttm = defaults.parallel_ttm
@property
def mlrank(self):
return self._mlrank
@mlrank.setter
def mlrank(self, new_value):
if new_value != self._mlrank:
self._mlrank = new_value
self._check_reduced_rank(new_value)
self._recommendations = None
@property
def flattener(self):
return self._flattener
@flattener.setter
def flattener(self, new_value):
old_value = self._flattener
if new_value != old_value:
self._flattener = new_value
self._recommendations = None
@property
def tensor_outer_at(self):
vtarget = self._vectorize_target.lower()
if self.max_test_workers and (vtarget == 'parallel'):
# force single thread for tensor_outer_at to safely run in parallel
vtarget = 'cpu'
return tensor_outer_at(vtarget)
def _check_reduced_rank(self, mlrank):
for mode, entity in enumerate(self.data.fields):
factor = self.factors.get(entity, None)
if factor is None:
continue
rank = mlrank[mode]
if factor.shape[1] < rank:
self._is_ready = False
self.factors = {}
break
elif factor.shape[1] == rank:
continue
else:
# avoid accidental overwrites if factors backup exists
self.factors = dict(**self.factors)
rfactor, new_core = self.round_core(self.factors['core'], mode, rank)
self.factors[entity] = factor.dot(rfactor)
self.factors['core'] = new_core
@staticmethod
def round_core(core, mode, rank):
new_dims = [mode] + [m for m in range(core.ndim) if m!=mode]
mode_dim = core.shape[mode]
flat_core = core.transpose(new_dims).reshape((mode_dim, -1), order='F')
u, s, vt = np.linalg.svd(flat_core, full_matrices=False)
rfactor = u[:, :rank]
new_core = (np.ascontiguousarray(s[:rank, np.newaxis]*vt[:rank, :])
.reshape(rank, *[core.shape[i] for i in new_dims[1:]], order='F')
.transpose(inverse_permutation(np.array(new_dims))))
return rfactor, new_core
@staticmethod
def flatten_scores(tensor_scores, flattener=None):
flattener = flattener or slice(None)
if isinstance(flattener, str):
slicer = slice(None)
flatten = getattr(np, flattener)
matrix_scores = flatten(tensor_scores[..., slicer], axis=-1)
elif isinstance(flattener, int):
slicer = flattener
matrix_scores = tensor_scores[..., slicer]
elif isinstance(flattener, (list, slice)):
slicer = flattener
flatten = np.sum
matrix_scores = flatten(tensor_scores[..., slicer], axis=-1)
elif isinstance(flattener, tuple):
slicer, flatten_method = flattener
slicer = slicer or slice(None)
flatten = getattr(np, flatten_method)
matrix_scores = flatten(tensor_scores[..., slicer], axis=-1)
elif callable(flattener):
matrix_scores = flattener(tensor_scores)
else:
raise ValueError('Unrecognized value for flattener attribute')
return matrix_scores
def build(self):
idx, val, shp = self.data.to_coo(tensor_mode=True)
with track_time(self.training_time, verbose=self.verbose, model=self.method):
(users_factors, items_factors,
feedback_factors, core) = hooi(idx, val, shp, self.mlrank,
growth_tol=self.growth_tol,
num_iters=self.num_iters,
verbose=self.show_output,
parallel_ttm=self.parallel_ttm,
seed=self.seed)
self.factors[self.data.fields.userid] = users_factors
self.factors[self.data.fields.itemid] = items_factors
self.factors[self.data.fields.feedback] = feedback_factors
self.factors['core'] = core
def unfold_test_tensor_slice(self, test_data, shape, start, stop, mode):
slice_idx = self._slice_test_data(test_data, start, stop)
num_users = stop - start
num_items = shape[1]
num_fdbks = shape[2]
slice_shp = (num_users, num_items, num_fdbks)
idx, shp = unfold_tensor_coordinates(slice_idx, slice_shp, mode)
val = np.ones_like(slice_idx[2], dtype=np.uint8)
test_tensor_unfolded = csr_matrix((val, idx), shape=shp, dtype=val.dtype)
return test_tensor_unfolded, slice_idx
def slice_recommendations(self, test_data, shape, start, stop, test_users=None):
slice_idx = self._slice_test_data(test_data, start, stop)
v = self.factors[self.data.fields.itemid]
w = self.factors[self.data.fields.feedback]
# use the fact that test data is sorted by users for reduction:
scores = self.tensor_outer_at(1.0, v, w, slice_idx[1], slice_idx[2])
scores = np.add.reduceat(scores, np.r_[0, np.where(np.diff(slice_idx[0]))[0]+1])
wt_flat = self.flatten_scores(w.T, self.flattener) # TODO cache result
scores = np.tensordot(scores, wt_flat, axes=(2, 0)).dot(v.T)
return scores, slice_idx
def get_holdout_slice(self, start, stop):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
holdout = self.data.test.holdout
user_sel = (holdout[userid] >= start) & (holdout[userid] < stop)
holdout_users = holdout.loc[user_sel, userid].values.astype(np.int64) - start
holdout_items = holdout.loc[user_sel, itemid].values.astype(np.int64)
return (holdout_users, holdout_items)
# additional functionality: rating pediction
def predict_feedback(self):
if self.data.warm_start:
raise NotImplementedError
userid = self.data.fields.userid
itemid = self.data.fields.itemid
feedback = self.data.fields.feedback
holdout = self.data.test.holdout
holdout_users = holdout[userid].values.astype(np.int64)
holdout_items = holdout[itemid].values.astype(np.int64)
u = self.factors[userid]
v = self.factors[itemid]
w = self.factors[feedback]
g = self.factors['core']
gv = np.tensordot(g, v[holdout_items, :], (1, 1))
gu = (gv * u[holdout_users, None, :].T).sum(axis=0)
scores = w.dot(gu).T
predictions = np.argmax(scores, axis=-1)
feedback_idx = self.data.index.feedback.set_index('new')
predicted_feedback = feedback_idx.loc[predictions, 'old'].values
return predicted_feedback
class RandomSampleEvaluationSVDMixin():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# as deifined in RandomSampleEvaluationMixin in data models
prefix = self.data._holdout_item_prefix
self._prediction_target = f'{prefix}_{self.data.fields.itemid}'
def compute_holdout_scores(self, user_factors, item_factors):
holdout = self.data.test.holdout
userid = self.data.fields.userid
itemid = self.data.fields.itemid
holdout_size = self.data.holdout_size
assert holdout_size >= 1 # only fixed number of holdout items is supported
# "rebase" user index (see comments in `get_recommmendations`)
useridx = pd.factorize(
holdout[userid], sort=False # already sorted via data moodel
)[0].reshape(-1, holdout_size)
itemidx = holdout[itemid].values.reshape(-1, holdout_size)
inner_product = inner_product_at(target='parallel')
# for general matrix factorization user must take care of submitting
# user_factors of the correct shape, otherwise, if holdout contains
# only a subset of all users, the answer will be incorrect
return inner_product(user_factors, item_factors, useridx, itemidx)
def compute_random_item_scores(self, user_factors, item_factors):
holdout = self.data.test.holdout
userid = self.data.fields.userid
test_users = holdout[userid].drop_duplicates().values # preserve sorted
test_items = self.data.unseen_interactions.loc[test_users].values
# "rebase" user index (see comments in `get_recommmendations`)
n_users = len(test_users)
n_items = self.data.unseen_items_num
useridx = np.broadcast_to(np.arange(n_users)[:, None], (n_users, n_items))
itemidx = np.concatenate(test_items).reshape(n_users, n_items)
# perform vectorized scalar products at bulk
inner_product = inner_product_at(target='parallel')
# for general matrix factorization user must take care of submitting
# user_factors of the correct shape, otherwise, if holdout contains
# only a subset of all users, the answer will be incorrect
return inner_product(user_factors, item_factors, useridx, itemidx)
def compute_random_item_scores_gen(self, user_factors, item_factors,
profile_matrix, n_unseen):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
holdout = self.data.test.holdout
n_users = profile_matrix.shape[0]
seed = self.data.seed
holdout_matrix = matrix_from_observations(
holdout, userid, itemid, profile_matrix.shape, feedback=None
)
all_seen = profile_matrix + holdout_matrix # only need nnz indices
scores = np.zeros((n_users, n_unseen))
seedseq = np.random.SeedSequence(seed).generate_state(n_users)
mf_random_item_scoring(
user_factors, item_factors, all_seen.indptr, all_seen.indices,
n_unseen, seedseq, scores
)
return scores
def get_recommendations(self):
itemid = self.data.fields.itemid
if self._prediction_target == itemid:
return super().get_recommendations()
item_factors = self.factors[itemid]
test_matrix, _ = self.get_test_matrix()
user_factors = test_matrix.dot(item_factors)
# from now on will need to work with "rebased" user indices
# to properly index contiguous user matrices
holdout_scores = self.compute_holdout_scores(user_factors, item_factors)
if self.data.unseen_interactions is None:
n_unseen = self.data.unseen_items_num
if n_unseen is None:
raise ValueError('Number of items to sample is unspecified.')
unseen_scores = self.compute_random_item_scores_gen(
user_factors, item_factors, test_matrix, n_unseen
)
else:
unseen_scores = self.compute_random_item_scores(
user_factors, item_factors
)
# combine all scores and rank selected items
scores = np.concatenate((holdout_scores, unseen_scores), axis=1)
return np.apply_along_axis(self.topsort, 1, scores, self.topk)
| mit |
nmartensen/pandas | pandas/core/indexes/timedeltas.py | 1 | 34001 | """ implement the TimedeltaIndex """
from datetime import timedelta
import numpy as np
from pandas.core.dtypes.common import (
_TD_DTYPE,
is_integer, is_float,
is_bool_dtype,
is_list_like,
is_scalar,
is_integer_dtype,
is_object_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
_ensure_int64)
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.common import _maybe_box, _values_from_object
from pandas.core.indexes.base import Index
from pandas.core.indexes.numeric import Int64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import to_offset
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
from pandas.util._decorators import Appender, Substitution, deprecate_kwarg
from pandas.core.indexes.datetimelike import TimelikeOps, DatetimeIndexOpsMixin
from pandas.core.tools.timedeltas import (
to_timedelta, _coerce_scalar_to_timedelta_type)
from pandas.tseries.offsets import Tick, DateOffset
from pandas._libs import (lib, index as libindex, tslib as libts,
join as libjoin, Timedelta, NaT, iNaT)
def _td_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert timedelta-like to timedelta64
"""
def wrapper(self, other):
msg = "cannot compare a TimedeltaIndex with type {0}"
func = getattr(super(TimedeltaIndex, self), opname)
if _is_convertible_to_td(other) or other is NaT:
try:
other = _to_m8(other)
except ValueError:
# failed to parse as timedelta
raise TypeError(msg.format(type(other)))
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if not is_list_like(other):
raise TypeError(msg.format(type(other)))
other = TimedeltaIndex(other).values
result = func(other)
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == iNaT
else:
o_mask = other.view('i8') == iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return wrapper
class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq: a frequency for the index, optional
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
# define my properties & methods for delegation
_other_ops = []
_bool_ops = []
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["to_pytimedelta", "total_seconds",
"round", "floor", "ceil"]
__eq__ = _td_index_cmp('__eq__')
__ne__ = _td_index_cmp('__ne__', nat_result=True)
__lt__ = _td_index_cmp('__lt__')
__gt__ = _td_index_cmp('__gt__')
__le__ = _td_index_cmp('__le__')
__ge__ = _td_index_cmp('__ge__')
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
freq = None
def __new__(cls, data=None, unit=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None,
closed=None, verify_integrity=True, **kwargs):
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# convert if not already
if getattr(data, 'dtype', None) != _TD_DTYPE:
data = to_timedelta(data, unit=unit, box=False)
elif copy:
data = np.array(data, copy=True)
# check that we are matching freqs
if verify_integrity and len(data) > 0:
if freq is not None and not freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(
index[0], None, len(index), name, freq)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'timedeltas does not conform to '
'passed frequency {1}'
.format(inferred, freq.freqstr))
index.freq = freq
return index
if freq_infer:
index = cls._simple_new(data, name=name)
inferred = index.inferred_freq
if inferred:
index.freq = to_offset(inferred)
return index
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate(cls, start, end, periods, name, offset, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and '
'periods, exactly two must be specified')
if start is not None:
start = Timedelta(start)
if end is not None:
end = Timedelta(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
index = _generate_regular_range(start, end, periods, offset)
index = cls._simple_new(index, name=name, freq=offset)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
values = np.array(values, copy=False)
if values.dtype == np.object_:
values = libts.array_to_timedelta64(values)
if values.dtype != _TD_DTYPE:
values = _ensure_int64(values).view(_TD_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
name = self.name
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is index
name = com._maybe_match_name(self, delta)
else:
raise ValueError("cannot add the type {0} to a TimedeltaIndex"
.format(type(delta)))
result = TimedeltaIndex(new_values, freq='infer', name=name)
return result
def _evaluate_with_timedelta_like(self, other, op, opstr):
# allow division by a timedelta
if opstr in ['__div__', '__truediv__', '__floordiv__']:
if _is_convertible_to_td(other):
other = Timedelta(other)
if isna(other):
raise NotImplementedError(
"division by pd.NaT not implemented")
i8 = self.asi8
if opstr in ['__floordiv__']:
result = i8 // other.value
else:
result = op(i8, float(other.value))
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name, copy=False)
return NotImplemented
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
from pandas import Timestamp, DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
other = Timestamp(other)
i8 = self.asi8
result = checked_add_with_arr(i8, other.value)
result = self._maybe_mask_results(result, fill_value=iNaT)
return DatetimeIndex(result, name=self.name, copy=False)
def _sub_datelike(self, other):
from pandas import DatetimeIndex
if other is NaT:
result = self._nat_new(box=False)
else:
raise TypeError("cannot subtract a datelike from a TimedeltaIndex")
return DatetimeIndex(result, name=self.name, copy=False)
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
def _get_field(self, m):
values = self.asi8
hasnans = self.hasnans
if hasnans:
result = np.empty(len(self), dtype='float64')
mask = self._isnan
imask = ~mask
result.flat[imask] = np.array(
[getattr(Timedelta(val), m) for val in values[imask]])
result[mask] = np.nan
else:
result = np.array([getattr(Timedelta(val), m)
for val in values], dtype='int64')
return Index(result, name=self.name)
@property
def days(self):
""" Number of days for each element. """
return self._get_field('days')
@property
def seconds(self):
""" Number of seconds (>= 0 and less than 1 day) for each element. """
return self._get_field('seconds')
@property
def microseconds(self):
"""
Number of microseconds (>= 0 and less than 1 second) for each
element. """
return self._get_field('microseconds')
@property
def nanoseconds(self):
"""
Number of nanoseconds (>= 0 and less than 1 microsecond) for each
element.
"""
return self._get_field('nanoseconds')
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
from pandas import DataFrame
columns = ['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds', 'nanoseconds']
hasnans = self.hasnans
if hasnans:
def f(x):
if isna(x):
return [np.nan] * len(columns)
return x.components
else:
def f(x):
return x.components
result = DataFrame([f(x) for x in self])
result.columns = columns
if not hasnans:
result = result.astype('int64')
return result
def total_seconds(self):
"""
Total duration of each element expressed in seconds.
.. versionadded:: 0.17.0
"""
return Index(self._maybe_mask_results(1e-9 * self.asi8),
name=self.name)
def to_pytimedelta(self):
"""
Return TimedeltaIndex as object ndarray of datetime.timedelta objects
Returns
-------
datetimes : ndarray
"""
return libts.ints_to_pytimedelta(self.asi8)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_timedelta64_ns_dtype(dtype):
if copy is True:
return self.copy()
return self
elif is_timedelta64_dtype(dtype):
# return an index (essentially this is division)
result = self.values.astype(dtype, copy=copy)
if self.hasnans:
return Index(self._maybe_mask_results(result,
convert='float64'),
name=self.name)
return Index(result.astype('i8'), name=self.name)
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), dtype='i8',
name=self.name)
raise ValueError('Cannot cast TimedeltaIndex to dtype %s' % dtype)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key):
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance)
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
# TODO(wesm): dead code
# parsed = _coerce_scalar_to_timedelta_type(key, box=True)
# is_monotonic = self.is_monotonic
# # figure out the resolution of the passed td
# # and round to it
# # t1 = parsed.round(reso)
# t2 = t1 + to_offset(parsed.resolution) - Timedelta(1, 'ns')
# stamps = self.asi8
# if is_monotonic:
# # we are out of range
# if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
# t2.value < stamps[0]) or
# ((use_rhs and t1.value > stamps[-1] and
# t2.value > stamps[-1])))):
# raise KeyError
# # a monotonic (sorted) series can be sliced
# left = (stamps.searchsorted(t1.value, side='left')
# if use_lhs else None)
# right = (stamps.searchsorted(t2.value, side='right')
# if use_rhs else None)
# return slice(left, right)
# lhs_mask = (stamps >= t1.value) if use_lhs else True
# rhs_mask = (stamps <= t2.value) if use_rhs else True
# # try to find a the dates
# return (lhs_mask & rhs_mask).nonzero()[0]
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = _to_m8(value)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def dtype(self):
return _TD_DTYPE
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except:
pass
freq = None
if isinstance(item, (Timedelta, libts.NaTType)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def _is_convertible_to_td(key):
return isinstance(key, (DateOffset, timedelta, Timedelta,
np.timedelta64, compat.string_types))
def _to_m8(key):
"""
Timedelta-like => dt64
"""
if not isinstance(key, Timedelta):
# this also converts strings
key = Timedelta(key)
# return an type that can be compared
return np.int64(key.value).view(_TD_DTYPE)
def _generate_regular_range(start, end, periods, offset):
stride = offset.nanos
if periods is None:
b = Timedelta(start).value
e = Timedelta(end).value
e += stride - e % stride
elif start is not None:
b = Timedelta(start).value
e = b + periods * stride
elif end is not None:
e = Timedelta(end).value + stride
b = e - periods * stride
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = TimedeltaIndex._simple_new(data, None)
return data
def timedelta_range(start=None, end=None, periods=None, freq='D',
name=None, closed=None):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating timedeltas
end : string or timedelta-like, default None
Right bound for generating timedeltas
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
name : string, default None
Name of the resulting TimedeltaIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
"""
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name, closed=closed)
| bsd-3-clause |
numpy/datetime | numpy/core/code_generators/ufunc_docstrings.py | 5 | 83888 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
res : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added.
Returns
-------
y : {ndarray, scalar}
The sum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return the
angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above
on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse trigonometric function",
http://en.wikipedia.org/wiki/Inverse_trigonometric_function
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine elementwise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`y`-coordinate in radians ``[-pi, pi]``. If `x` is a scalar then
a scalar is returned, otherwise an array is returned.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sin(z) = x`. The convention is to return the
angle `z` whose real part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsin` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytical function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from above
on the former and from below on the latter.
The inverse sine is also known as `asin` or ``sin^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse trigonometric function",
http://en.wikipedia.org/wiki/Inverse_trigonometric_function
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then
``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is
in ``[-pi/2, pi/2]``. It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : Calculate the arctan of y/x.
Notes
-----
`arctan` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tan(z) = x`. The convention is to return the
angle `z` whose real part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from the
left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or ``tan^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse trigonometric function",
http://en.wikipedia.org/wiki/Arctan
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be :math:`\\pi/4`:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Elementwise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (ie. branch) is chosen so that ``arctan2(x1, x2)``
is the signed angle in radians between the line segments
``(0,0) - (1,0)`` and ``(0,0) - (x2,x1)``. This function is defined
also for `x2` = 0.
`arctan2` is not defined for complex-valued arguments.
Parameters
----------
x1 : array_like, real-valued
y-coordinates.
x2 : array_like, real-valued
x-coordinates. `x2` must be broadcastable to match the shape of `x1`,
or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan
Notes
-----
`arctan2` is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C standard [2]:
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers.
References
----------
.. [1] Wikipedia, "atan2",
http://en.wikipedia.org/wiki/Atan2
.. [2] ISO/IEC standard 9899:1999, "Programming language C", 1999.
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
Out : {ndarray, bool}
Output array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity, return result as bool
array.
Parameters
----------
x : array_like
Input values
y : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is positive or negative infinity; otherwise the
values are False. If a second argument is supplied the result is
stored there. If the type of that array is a numeric type the result
is represented as zeros and ones, if the type is boolean then as
False and True. The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if second argument is also supplied with scalar input or
if first and second arguments have different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
Out : ndarray of bools
Output array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
less_equal, greater, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
Returns
-------
Out : {ndarray, bool}
Output array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
less, greater_equal, greater, equal, not_equal
Examples
--------
>>> np.less_equal([1, 2, 3], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
XOR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_or, logical_not
bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : {array_like, scalar}
Input array.
Returns
-------
y : {ndarray, scalar}
Returned array or scalar `y=-x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like`.
See Also
--------
zeros_like
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
Returns element-wise base array raised to power from second array.
Raise each base in `x1` to the power of the exponents in `x2`. This
requires that `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like, bool
Output array.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1, 2) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The square root of each element in this array is calculated.
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the square-root of
each element in `x`. If any element in `x`
is complex, a complex array is returned. If all of the elements
of `x` are real, negative elements return numpy.nan elements.
See Also
--------
numpy.lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
`sqrt` has a branch cut ``[-inf, 0)`` and is continuous from above on it.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/reshape/reshape.py | 1 | 35967 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, text_type, zip
from pandas import compat
from functools import partial
import itertools
import numpy as np
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion, is_sparse, is_object_dtype)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notna
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.index import Index, MultiIndex
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
values : ndarray
Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
value_columns : Index, optional
Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame or SparseDataFrame will be used.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
self.is_categorical = None
self.is_sparse = is_sparse(values)
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
elif self.is_sparse:
# XXX: Makes SparseArray *dense*, but it's supposedly
# a single column at a time, so it's "doable"
values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
if self.is_sparse:
self.constructor = SparseDataFrame
else:
self.constructor = DataFrame
else:
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index.remove_unused_levels()
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {level}. The index "
"names are not unique.".format(level=level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new labels to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_labels.append(np.tile(repeater, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
else:
if index is None:
index = self.index
else:
index = self[index]
index = MultiIndex.from_arrays([index, self[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = self._constructor(self[values].values, index=index,
columns=values)
else:
indexed = self._constructor_sliced(self[values].values,
index=index)
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sort_index(level=0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor)
return unstacker.get_result()
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {level}. The column "
"names are not unique.".format(level=level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
See Also
--------
Series.str.get_dummies
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
dtypes_to_encode = ['object', 'category']
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = \
len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False, dtype=None):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index, default_fill_value=0)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type)
else '{prefix}{sep}{level}' for v in levels]
dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v)
for dummy_str, v in zip(dummy_strs, levels)]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=dtype)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
dtype=dtype)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| mit |
mhvk/astropy | astropy/modeling/functional_models.py | 2 | 90573 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from astropy.utils.decorators import deprecated
from .core import (Fittable1DModel, Fittable2DModel)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D',
'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D',
'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale',
'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Trapezoid1D',
'TrapezoidDisk2D', 'Ring2D', 'Voigt1D', 'KingProjectedAnalytic1D',
'Exponential1D', 'Logarithmic1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian")
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian")
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mean': inputs_unit[self.inputs[0]],
'stddev': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
Rotation angle (value in radians). The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian")
y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian")
x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)")
y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)")
theta = Parameter(default=0.0, description="Rotation angle [in radians] (Optional parameter)")
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit,
self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_mean': inputs_unit[self.inputs[0]],
'y_mean': inputs_unit[self.inputs[0]],
'x_stddev': inputs_unit[self.inputs[0]],
'y_stddev': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'offset': outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='Redshift', default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.z) for x in self.bounding_box)
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_eff': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': inputs_unit[self.inputs[0]] ** -1,
'amplitude': outputs_unit[self.outputs[0]]}
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit[self.outputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]]}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit['z'],
'slope_x': outputs_unit['z'] / inputs_unit['x'],
'slope_y': outputs_unit['z'] / inputs_unit['y']}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0,
description="Position of the peak")
amplitude_L = Parameter(default=1, # noqa: N815
description="The Lorentzian amplitude")
fwhm_L = Parameter(default=2/np.pi, # noqa: N815
description="The Lorentzian full width at half maximum")
fwhm_G = Parameter(default=np.log(2), # noqa: N815
description="The Gaussian full width at half maximum")
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(self, x_0=x_0.default, amplitude_L=amplitude_L.default, # noqa: N803
fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method='humlicek2', # noqa: N803
**kwargs):
if str(method).lower() in ('wofz', 'scipy'):
try:
from scipy.special import wofz
except (ValueError, ImportError) as err:
raise ImportError(f'Voigt1D method {method} requires scipy: {err}.') from err
self._faddeeva = wofz
elif str(method).lower() == 'humlicek2':
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(f'Not a valid method for Voigt1D Faddeeva function: {method}.')
self.method = self._faddeeva.__name__
super().__init__(x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`."""
if (z.shape == self._last_z.shape and
np.allclose(z, self._last_z, rtol=1.e-14, atol=1.e-15)):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""Derivative of the one dimensional Voigt function with respect to parameters."""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm_L': inputs_unit[self.inputs[0]],
'fwhm_G': inputs_unit[self.inputs[0]],
'amplitude_L': outputs_unit[self.outputs[0]]}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z) for z = x + iy combining Humlicek's rational approximations:
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
AA = np.array([+46236.3358828121, -147726.58393079657j, # noqa: N806
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j]) # 1j/sqrt(pi) to the 12. digit
bb = np.array([+7918.06640624997, 0.0,
-126689.0625, 0.0,
+295607.8125, 0.0,
-236486.25, 0.0,
+84459.375, 0.0,
-15015.0, 0.0,
+1365.0, 0.0,
-60.0, 0.0,
+1.0])
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz*(zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
denom = (((((((ZZ + bb[14])*ZZ + bb[12])*ZZ + bb[10])*ZZ+bb[8])*ZZ + bb[6])*ZZ +
bb[4])*ZZ + bb[2])*ZZ + bb[0]
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse")
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(default=0, description="The rotation angle in radians of the semimajor axis (Positive - counterclockwise)")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'a': inputs_unit[self.inputs[0]],
'b': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function")
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function")
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_in': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A")
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude")
x_0 = Parameter(default=0, description="X position of the center of the box function")
y_0 = Parameter(default=0, description="Y position of the center of the box function")
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[1]],
'x_width': inputs_unit[self.inputs[0]],
'y_width': inputs_unit[self.inputs[1]],
'amplitude': outputs_unit[self.outputs[0]]}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(default=1, description="Slope of tails of trapezoid in x direction")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Airy function")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(default=1,
description="The radius of the Airy disk (radius of first zero crossing)")
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'radius': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
fac = (1 + (x - x_0) ** 2 / gamma ** 2)
d_A = fac ** (-alpha)
d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2))
d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A /
(fac * gamma ** 3))
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(default=0, description="X position of the maximum of the Moffat model")
y_0 = Parameter(default=0, description="Y position of the maximum of the Moffat model")
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = (2 * amplitude * alpha * d_A * rr_gg /
(gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(default=0, description="Rotation angle in radians (counterclockwise-positive)")
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_eff': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) paramter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label='c ~ {:0.2f}'.format(mod.concentration))
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor")
r_core = Parameter(default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius")
r_tide = Parameter(default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius")
@property
def concentration(self):
"""Concentration parameter of the king model"""
return np.log10(np.abs(self.r_tide/self.r_core))
@staticmethod
def evaluate(x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.
return result
@staticmethod
def fit_deriv(x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
d_r_core = 2 * amplitude * r_core ** 2 * (r_core/(r_core ** 2 + r_tide ** 2) ** (3/2) -
r_core/(r_core ** 2 + x ** 2) ** (3/2)) * \
(1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) + \
2 * amplitude * r_core * (1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)) ** 2
d_r_tide = (2 * amplitude * r_core ** 2 * r_tide *
(1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)))/(r_core ** 2 + r_tide ** 2) ** (3/2)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
d_amplitude[bounds] = d_amplitude[bounds]*0
d_r_core[bounds] = d_r_core[bounds]*0
d_r_tide[bounds] = d_r_tide[bounds]*0
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_core': inputs_unit[self.inputs[0]],
'r_tide': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if val == 0:
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
''' Derivative with respect to parameters'''
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
''' tau cannot be 0'''
if val == 0:
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
@deprecated('4.0', alternative='RickerWavelet1D')
class MexicanHat1D(RickerWavelet1D):
""" Deprecated."""
@deprecated('4.0', alternative='RickerWavelet2D')
class MexicanHat2D(RickerWavelet2D):
""" Deprecated."""
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/model_selection/tests/test_split.py | 7 | 41116 | """Test the split module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
from scipy import stats
from scipy.misc import comb
from itertools import combinations
from sklearn.utils.fixes import combinations_with_replacement
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import _num_samples
from sklearn.utils.mocking import MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import TimeSeriesSplit
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import check_cv
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection._split import _validate_shuffle_split
from sklearn.model_selection._split import _CVIterableWrapper
from sklearn.model_selection._split import _build_repr
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.svm import SVC
X = np.ones(10)
y = np.arange(10) // 2
P_sparse = coo_matrix(np.eye(5))
digits = load_digits()
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
@ignore_warnings
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, test_size=0.1, "
"train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, X, y, groups, expected_n_splits=None):
n_samples = _num_samples(X)
# Check that a all the samples appear at least once in a test fold
if expected_n_splits is not None:
assert_equal(cv.get_n_splits(X, y, groups), expected_n_splits)
else:
expected_n_splits = cv.get_n_splits(X, y, groups)
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_splits)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
assert_warns_message(Warning, "The least populated class",
next, skf_3.split(X2, y))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
assert_raises(ValueError, next, skf_3.split(X2, y))
# Error when number of folds is <= 1
assert_raises(ValueError, KFold, 0)
assert_raises(ValueError, KFold, 1)
error_string = ("k-fold cross-validation requires at least one"
" train/test split")
assert_raise_message(ValueError, error_string,
StratifiedKFold, 0)
assert_raise_message(ValueError, error_string,
StratifiedKFold, 1)
# When n_splits is not integer:
assert_raises(ValueError, KFold, 1.5)
assert_raises(ValueError, KFold, 2.0)
assert_raises(ValueError, StratifiedKFold, 1.5)
assert_raises(ValueError, StratifiedKFold, 2.0)
# When shuffle is not a bool:
assert_raises(TypeError, KFold, n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert_equal(5, KFold(5).get_n_splits(X2))
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in (False, True):
for train, test in StratifiedKFold(5, shuffle=shuffle).split(X, y):
assert_almost_equal(np.sum(y[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(y[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(y[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(y[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(y[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(y[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
for shuffle in (True, False):
cv = StratifiedKFold(3, shuffle=shuffle)
for i in range(11, 17):
skf = cv.split(X[:i], y[:i])
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), i)
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert_not_equal(len(np.intersect1d(tr_a, tr_b)), len(tr1))
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert_equal(sum(all_folds), 300)
def test_shuffle_kfold_stratifiedkfold_reproducibility():
# Check that when the shuffle is True multiple split calls produce the
# same split when random_state is set
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
kf = KFold(3, shuffle=True, random_state=0)
skf = StratifiedKFold(3, shuffle=True, random_state=0)
for cv in (kf, skf):
np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))
kf = KFold(3, shuffle=True)
skf = StratifiedKFold(3, shuffle=True)
for cv in (kf, skf):
for data in zip((X, X2), (y, y2)):
try:
np.testing.assert_equal(list(cv.split(*data)),
list(cv.split(*data)))
except AssertionError:
pass
else:
raise AssertionError("The splits for data, %s, are same even "
"when random state is not set" % data)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y),
kf1.split(X_40, y)):
assert_not_equal(set(test0), set(test1))
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.92, mean_score)
assert_greater(mean_score, 0.80)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.92)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.93, mean_score)
assert_greater(mean_score, 0.80)
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
for typ in six.integer_types:
ss4 = ShuffleSplit(test_size=typ(2), random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, next, StratifiedShuffleSplit(3, 2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 3, 2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, StratifiedShuffleSplit, 3, 0.5, 0.6)
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 8, 0.6).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(3, 0.6, 8).split(X, y))
# Train size or test size too small
assert_raises(ValueError, next,
StratifiedShuffleSplit(train_size=2).split(X, y))
assert_raises(ValueError, next,
StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(6, test_size=test_size, train_size=train_size,
random_state=0).split(np.ones(len(y)), y)
for train, test in sss:
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)])
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33,
random_state=0).split(np.ones(len(y)), y)
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
assert_true(prob > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
groups = np.array((n_samples // 2) * [0, 1])
splits = StratifiedShuffleSplit(n_splits=n_splits,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits_actual = 0
for train, test in splits.split(X=np.ones(n_samples), y=groups):
n_splits_actual += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits_actual, n_splits)
n_train, n_test = _validate_shuffle_split(
n_samples, test_size=1. / n_folds, train_size=1. - (1. / n_folds))
assert_equal(len(train), n_train)
assert_equal(len(test), n_test)
assert_equal(len(set(train).intersection(test)), 0)
group_counts = np.unique(groups)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(n_train + n_test, len(groups))
assert_equal(len(group_counts), 2)
ex_test_p = float(n_test) / n_samples
ex_train_p = float(n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
y = [0, 1, 2, 3] * 3 + [4, 5] * 5
X = np.ones_like(y)
sss = StratifiedShuffleSplit(n_splits=1,
test_size=0.5, random_state=0)
train, test = next(iter(sss.split(X=X, y=y)))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(KFold(5, shuffle=True).split(X)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = PredefinedSplit(folds)
# n_splits is simply the no of unique folds
assert_equal(len(np.unique(folds)), ps.get_n_splits())
for train_ind, test_ind in ps.split():
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_group_shuffle_split():
groups = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4])]
for l in groups:
X = y = np.ones(len(l))
n_splits = 6
test_size = 1./3
slo = GroupShuffleSplit(n_splits, test_size=test_size, random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(slo.get_n_splits(X, y, groups=l), n_splits)
l_unique = np.unique(l)
for train, test in slo.split(X, y, groups=l):
# First test: no train group is in the test set and vice versa
l_train_unique = np.unique(l[train])
l_test_unique = np.unique(l[test])
assert_false(np.any(np.in1d(l[train], l_test_unique)))
assert_false(np.any(np.in1d(l[test], l_train_unique)))
# Second test: train and test add up to all the data
assert_equal(l[train].size + l[test].size, l.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test:
# unique train and test groups are correct, +- 1 for rounding error
assert_true(abs(len(l_test_unique) -
round(test_size * len(l_unique))) <= 1)
assert_true(abs(len(l_train_unique) -
round((1.0 - test_size) * len(l_unique))) <= 1)
def test_leave_group_out_changing_groups():
# Check that LeaveOneGroupOut and LeavePGroupsOut work normally if
# the groups variable is changed before calling split
groups = np.array([0, 1, 2, 1, 1, 2, 0, 0])
X = np.ones(len(groups))
groups_changing = np.array(groups, copy=True)
lolo = LeaveOneGroupOut().split(X, groups=groups)
lolo_changing = LeaveOneGroupOut().split(X, groups=groups)
lplo = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
lplo_changing = LeavePGroupsOut(n_groups=2).split(X, groups=groups)
groups_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
# n_splits = no of 2 (p) group combinations of the unique groups = 3C2 = 3
assert_equal(3, LeavePGroupsOut(n_groups=2).get_n_splits(X, y, groups))
# n_splits = no of unique groups (C(uniq_lbls, 1) = n_unique_groups)
assert_equal(3, LeaveOneGroupOut().get_n_splits(X, y, groups))
def test_train_test_split_errors():
assert_raises(ValueError, train_test_split)
assert_raises(ValueError, train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# don't convert lists to anything else by default
split = train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = train_test_split(y, test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
@ignore_warnings
def train_test_split_pandas():
# check train_test_split doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_sparse():
# check that train_test_split converts scipy sparse matrices
# to csr, as stated in the documentation
X = np.arange(100).reshape((10, 10))
sparse_types = [csr_matrix, csc_matrix, coo_matrix]
for InputFeatureType in sparse_types:
X_s = InputFeatureType(X)
X_train, X_test = train_test_split(X_s)
assert_true(isinstance(X_train, csr_matrix))
assert_true(isinstance(X_test, csr_matrix))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = train_test_split(X_df)
def test_shufflesplit_errors():
# When the {test|train}_size is a float/invalid, error is raised at init
assert_raises(ValueError, ShuffleSplit, test_size=None, train_size=None)
assert_raises(ValueError, ShuffleSplit, test_size=2.0)
assert_raises(ValueError, ShuffleSplit, test_size=1.0)
assert_raises(ValueError, ShuffleSplit, test_size=0.1, train_size=0.95)
assert_raises(ValueError, ShuffleSplit, train_size=1j)
# When the {test|train}_size is an int, validation is based on the input X
# and happens at split(...)
assert_raises(ValueError, next, ShuffleSplit(test_size=11).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=10).split(X))
assert_raises(ValueError, next, ShuffleSplit(test_size=8,
train_size=3).split(X))
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = ShuffleSplit(random_state=21)
assert_array_equal(list(a for a, b in ss.split(X)),
list(a for a, b in ss.split(X)))
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
train_test_split(X, y, test_size=0.2, random_state=42)
def test_check_cv():
X = np.ones(9)
cv = check_cv(3, classifier=False)
# Use numpy.testing.assert_equal which recursively compares
# lists of lists
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = check_cv(3, y_binary, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_binary)),
list(cv.split(X, y_binary)))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = check_cv(3, y_multiclass, classifier=True)
np.testing.assert_equal(list(StratifiedKFold(3).split(X, y_multiclass)),
list(cv.split(X, y_multiclass)))
X = np.ones(5)
y_multilabel = np.array([[0, 0, 0, 0], [0, 1, 1, 0], [0, 0, 0, 1],
[1, 1, 0, 1], [0, 0, 1, 0]])
cv = check_cv(3, y_multilabel, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = check_cv(3, y_multioutput, classifier=True)
np.testing.assert_equal(list(KFold(3).split(X)), list(cv.split(X)))
# Check if the old style classes are wrapped to have a split method
X = np.ones(9)
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv1 = check_cv(3, y_multiclass, classifier=True)
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv2 = check_cv(OldSKF(y_multiclass, n_folds=3))
np.testing.assert_equal(list(cv1.split(X, y_multiclass)),
list(cv2.split()))
assert_raises(ValueError, check_cv, cv="lolo")
def test_cv_iterable_wrapper():
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
with warnings.catch_warnings(record=True):
from sklearn.cross_validation import StratifiedKFold as OldSKF
cv = OldSKF(y_multiclass, n_folds=3)
wrapped_old_skf = _CVIterableWrapper(cv)
# Check if split works correctly
np.testing.assert_equal(list(cv), list(wrapped_old_skf.split()))
# Check if get_n_splits works correctly
assert_equal(len(cv), wrapped_old_skf.get_n_splits())
def test_group_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_groups = 15
n_samples = 1000
n_splits = 5
X = y = np.ones(n_samples)
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
groups = rng.randint(0, n_groups, n_samples)
ideal_n_groups_per_fold = n_samples // n_splits
len(np.unique(groups))
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
lkf = GroupKFold(n_splits=n_splits)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Construct the test data
groups = np.array(['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David',
'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'])
n_groups = len(np.unique(groups))
n_samples = len(groups)
n_splits = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
ideal_n_groups_per_fold = n_samples // n_splits
X = y = np.ones(n_samples)
# Get the test fold indices from the test set indices of each fold
folds = np.zeros(n_samples)
for i, (_, test) in enumerate(lkf.split(X, y, groups)):
folds[test] = i
# Check that folds have approximately the same size
assert_equal(len(folds), len(groups))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_groups_per_fold))
# Check that each group appears only in 1 fold
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for group in np.unique(groups):
assert_equal(len(np.unique(folds[groups == group])), 1)
# Check that no group is on both sides of the split
groups = np.asarray(groups, dtype=object)
for train, test in lkf.split(X, y, groups):
assert_equal(len(np.intersect1d(groups[train], groups[test])), 0)
# Should fail if there are more folds than groups
groups = np.array([1, 1, 1, 2, 2])
X = y = np.ones(len(groups))
assert_raises_regexp(ValueError, "Cannot have number of splits.*greater",
next, GroupKFold(n_splits=3).split(X, y, groups))
def test_time_series_cv():
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
# Should fail if there are more folds than samples
assert_raises_regexp(ValueError, "Cannot have number of folds.*greater",
next,
TimeSeriesSplit(n_splits=7).split(X))
tscv = TimeSeriesSplit(2)
# Manually check that Time Series CV preserves the data
# ordering on toy datasets
splits = tscv.split(X[:-1])
train, test = next(splits)
assert_array_equal(train, [0, 1])
assert_array_equal(test, [2, 3])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3])
assert_array_equal(test, [4, 5])
splits = TimeSeriesSplit(2).split(X)
train, test = next(splits)
assert_array_equal(train, [0, 1, 2])
assert_array_equal(test, [3, 4])
train, test = next(splits)
assert_array_equal(train, [0, 1, 2, 3, 4])
assert_array_equal(test, [5, 6])
# Check get_n_splits returns the correct number of splits
splits = TimeSeriesSplit(2).split(X)
n_splits_actual = len(list(splits))
assert_equal(n_splits_actual, tscv.get_n_splits())
assert_equal(n_splits_actual, 2)
def test_nested_cv():
# Test if nested cross validation works with different combinations of cv
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 5, 15)
cvs = [LeaveOneGroupOut(), LeaveOneOut(), GroupKFold(), StratifiedKFold(),
StratifiedShuffleSplit(n_splits=3, random_state=0)]
for inner_cv, outer_cv in combinations_with_replacement(cvs, 2):
gs = GridSearchCV(Ridge(), param_grid={'alpha': [1, .1]},
cv=inner_cv)
cross_val_score(gs, X=X, y=y, groups=groups, cv=outer_cv,
fit_params={'groups': groups})
def test_build_repr():
class MockSplitter:
def __init__(self, a, b=0, c=None):
self.a = a
self.b = b
self.c = c
def __repr__(self):
return _build_repr(self)
assert_equal(repr(MockSplitter(5, 6)), "MockSplitter(a=5, b=6, c=None)")
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/feature_extraction/tests/test_text.py | 39 | 36062 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/decomposition/nmf.py | 8 | 45009 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck
# Mathieu Blondel <mathieu@mblondel.org>
# Tom Dupre la Tour
# License: BSD 3 clause
from __future__ import division, print_function
from math import sqrt
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.extmath import safe_min
from ..utils.validation import check_is_fitted, check_non_negative
from ..exceptions import ConvergenceWarning
from .cdnmf_fast import _update_cdnmf_fast
EPSILON = np.finfo(np.float32).eps
INTEGER_TYPES = (numbers.Integral, np.integer)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _check_init(A, shape, whom):
A = check_array(A)
if np.shape(A) != shape:
raise ValueError('Array with wrong shape passed to %s. Expected %s, '
'but got %s ' % (whom, shape, np.shape(A)))
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError('Array passed to %s is full of zeros.' % whom)
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like, shape (n_samples, n_features)
W : float or dense array-like, shape (n_samples, n_components)
H : float or dense array-like, shape (n_components, n_features)
beta : float, string in {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : boolean, default False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H)
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
res = (norm_X + norm_WH - 2. * cross_prod) / 2.
else:
res = squared_norm(X - np.dot(W, H)) / 2.
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
WH_data = WH_data[X_data != 0]
X_data = X_data[X_data != 0]
# used to avoid division by zero
WH_data[WH_data == 0] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH ** beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data ** beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
return np.sqrt(2 * res)
else:
return res
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
dot_vals = np.multiply(W[ii, :], H.T[jj, :]).sum(axis=1)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return np.dot(W, H)
def _compute_regularization(alpha, l1_ratio, regularization):
"""Compute L1 and L2 regularization coefficients for W and H"""
alpha_H = 0.
alpha_W = 0.
if regularization in ('both', 'components'):
alpha_H = float(alpha)
if regularization in ('both', 'transformation'):
alpha_W = float(alpha)
l1_reg_W = alpha_W * l1_ratio
l1_reg_H = alpha_H * l1_ratio
l2_reg_W = alpha_W * (1. - l1_ratio)
l2_reg_H = alpha_H * (1. - l1_ratio)
return l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H
def _check_string_param(solver, regularization, beta_loss, init):
allowed_solver = ('cd', 'mu')
if solver not in allowed_solver:
raise ValueError(
'Invalid solver parameter: got %r instead of one of %r' %
(solver, allowed_solver))
allowed_regularization = ('both', 'components', 'transformation', None)
if regularization not in allowed_regularization:
raise ValueError(
'Invalid regularization parameter: got %r instead of one of %r' %
(regularization, allowed_regularization))
# 'mu' is the only solver that handles other beta losses than 'frobenius'
if solver != 'mu' and beta_loss not in (2, 'frobenius'):
raise ValueError(
'Invalid beta_loss parameter: solver %r does not handle beta_loss'
' = %r' % (solver, beta_loss))
if solver == 'mu' and init == 'nndsvd':
warnings.warn("The multiplicative update ('mu') solver cannot update "
"zeros present in the initialization, and so leads to "
"poorer results when used jointly with init='nndsvd'. "
"You may try init='nndsvda' or init='nndsvdar' instead.",
UserWarning)
beta_loss = _beta_loss_to_float(beta_loss)
return beta_loss
def _beta_loss_to_float(beta_loss):
"""Convert string beta_loss to float"""
allowed_beta_loss = {'frobenius': 2,
'kullback-leibler': 1,
'itakura-saito': 0}
if isinstance(beta_loss, str) and beta_loss in allowed_beta_loss:
beta_loss = allowed_beta_loss[beta_loss]
if not isinstance(beta_loss, numbers.Number):
raise ValueError('Invalid beta_loss parameter: got %r instead '
'of one of %r, or a float.' %
(beta_loss, allowed_beta_loss.keys()))
return beta_loss
def _initialize_nmf(X, n_components, init=None, eps=1e-6,
random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : integer
The number of components desired in the approximation.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise 'random'.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``random`` == 'nndsvdar' or 'random'.
Returns
-------
W : array-like, shape (n_samples, n_components)
Initial guesses for solving X ~= WH
H : array-like, shape (n_components, n_features)
Initial guesses for solving X ~= WH
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if init is None:
if n_components < n_features:
init = 'nndsvd'
else:
init = 'random'
# Random initialization
if init == 'random':
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features)
W = avg * rng.randn(n_samples, n_components)
# we do not write np.abs(H, out=H) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(H, H)
np.abs(W, W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'random', 'nndsvd', 'nndsvda', 'nndsvdar')))
return W, H
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle,
random_state):
"""Helper function for _fit_coordinate_descent
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...)
"""
n_components = Ht.shape[1]
HHt = np.dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.:
# adds l2_reg only on the diagonal
HHt.flat[::n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(X, W, H, tol=1e-4, max_iter=200, l1_reg_W=0,
l1_reg_H=0, l2_reg_W=0, l2_reg_H=0, update_H=True,
verbose=0, shuffle=False, random_state=None):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order='C')
X = check_array(X, accept_sparse='csr')
rng = check_random_state(random_state)
for n_iter in range(max_iter):
violation = 0.
# Update W
violation += _update_coordinate_descent(X, W, Ht, l1_reg_W,
l2_reg_W, shuffle, rng)
# Update H
if update_H:
violation += _update_coordinate_descent(X.T, Ht, W, l1_reg_H,
l2_reg_H, shuffle, rng)
if n_iter == 0:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def _multiplicative_update_w(X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum=None, HHt=None, XHt=None, update_H=True):
"""update W in Multiplicative Update NMF"""
if beta_loss == 2:
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
# Numerator
# if X is sparse, compute WH only where X is non zero
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid taking a negative power of zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
numerator = safe_sparse_dot(WH_safe_X, H.T)
# Denominator
if beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1) # shape(n_components, )
denominator = H_sum[np.newaxis, :]
else:
# computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
if sp.issparse(X):
# memory efficient computation
# (compute row by row, avoiding the dense matrix WH)
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
# Add L1 and L2 regularization
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_W **= gamma
return delta_W, H_sum, HHt, XHt
def _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma):
"""update H in Multiplicative Update NMF"""
if beta_loss == 2:
numerator = safe_sparse_dot(W.T, X)
denominator = np.dot(np.dot(W.T, W), H)
else:
# Numerator
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1. < 0:
WH[WH == 0] = EPSILON
# to avoid division by zero
if beta_loss - 2. < 0:
WH_safe_X_data[WH_safe_X_data == 0] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
numerator = safe_sparse_dot(W.T, WH_safe_X)
# Denominator
if beta_loss == 1:
W_sum = np.sum(W, axis=0) # shape(n_components, )
W_sum[W_sum == 0] = 1.
denominator = W_sum[:, np.newaxis]
# beta_loss not in (1, 2)
else:
# computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
if sp.issparse(X):
# memory efficient computation
# (compute column by column, avoiding the dense matrix WH)
WtWH = np.empty(H.shape)
for i in range(X.shape[1]):
WHi = np.dot(W, H[:, i])
if beta_loss - 1 < 0:
WHi[WHi == 0] = EPSILON
WHi **= beta_loss - 1
WtWH[:, i] = np.dot(W.T, WHi)
else:
WH **= beta_loss - 1
WtWH = np.dot(W.T, WH)
denominator = WtWH
# Add L1 and L2 regularization
if l1_reg_H > 0:
denominator += l1_reg_H
if l2_reg_H > 0:
denominator = denominator + l2_reg_H * H
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_H = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_H **= gamma
return delta_H
def _fit_multiplicative_update(X, W, H, beta_loss='frobenius',
max_iter=200, tol=1e-4,
l1_reg_W=0, l1_reg_H=0, l2_reg_W=0, l2_reg_H=0,
update_H=True, verbose=0):
"""Compute Non-negative Matrix Factorization with Multiplicative Update
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant input matrix.
W : array-like, shape (n_samples, n_components)
Initial guess for the solution.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : integer, default: 200
Number of iterations.
tol : float, default: 1e-4
Tolerance of the stopping condition.
l1_reg_W : double, default: 0.
L1 regularization parameter for W.
l1_reg_H : double, default: 0.
L1 regularization parameter for H.
l2_reg_W : double, default: 0.
L2 regularization parameter for W.
l2_reg_H : double, default: 0.
L2 regularization parameter for H.
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : integer, default: 0
The verbosity level.
Returns
-------
W : array, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1. / (2. - beta_loss)
elif beta_loss > 2:
gamma = 1. / (beta_loss - 1.)
else:
gamma = 1.
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
delta_W, H_sum, HHt, XHt = _multiplicative_update_w(
X, W, H, beta_loss, l1_reg_W, l2_reg_W, gamma,
H_sum, HHt, XHt, update_H)
W *= delta_W
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.
# update H
if update_H:
delta_H = _multiplicative_update_h(X, W, H, beta_loss, l1_reg_H,
l2_reg_H, gamma)
H *= delta_H
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print("Epoch %02d reached after %.3f seconds, error: %f" %
(n_iter, iter_time - start_time, error))
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print("Epoch %02d reached after %.3f seconds." %
(n_iter, end_time - start_time))
return W, H, n_iter
def non_negative_factorization(X, W=None, H=None, n_components=None,
init='random', update_H=True, solver='cd',
beta_loss='frobenius', tol=1e-4,
max_iter=200, alpha=0., l1_ratio=0.,
regularization=None, random_state=None,
verbose=0, shuffle=False):
"""Compute Non-negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
If update_H=False, it is used as a constant, to solve for W only.
n_components : integer
Number of components, if n_components is not set all features
are kept.
init : None | 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
update_H : boolean, default: True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
regularization : 'both' | 'components' | 'transformation' | None
Select whether the regularization affects the components (H), the
transformation (W), both or none of them.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : integer, default: 0
The verbosity level.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
Returns
-------
W : array-like, shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import non_negative_factorization
>>> W, H, n_iter = non_negative_factorization(X, n_components=2, \
init='random', random_state=0)
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
check_non_negative(X, "NMF (input X)")
beta_loss = _check_string_param(solver, regularization, beta_loss, init)
if safe_min(X) == 0 and beta_loss <= 0:
raise ValueError("When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values to "
"X, or use a positive beta_loss.")
n_samples, n_features = X.shape
if n_components is None:
n_components = n_features
if not isinstance(n_components, INTEGER_TYPES) or n_components <= 0:
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(max_iter, INTEGER_TYPES) or max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % max_iter)
if not isinstance(tol, numbers.Number) or tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % tol)
# check W and H, or initialize them
if init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
# 'mu' solver should not be initialized by zeros
if solver == 'mu':
avg = np.sqrt(X.mean() / n_components)
W = avg * np.ones((n_samples, n_components))
else:
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=init,
random_state=random_state)
l1_reg_W, l1_reg_H, l2_reg_W, l2_reg_H = _compute_regularization(
alpha, l1_ratio, regularization)
if solver == 'cd':
W, H, n_iter = _fit_coordinate_descent(X, W, H, tol, max_iter,
l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H,
update_H=update_H,
verbose=verbose,
shuffle=shuffle,
random_state=random_state)
elif solver == 'mu':
W, H, n_iter = _fit_multiplicative_update(X, W, H, beta_loss, max_iter,
tol, l1_reg_W, l1_reg_H,
l2_reg_W, l2_reg_H, update_H,
verbose)
else:
raise ValueError("Invalid solver parameter '%s'." % solver)
if n_iter == max_iter and tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it to"
" improve convergence." % max_iter, ConvergenceWarning)
return W, H, n_iter
class NMF(BaseEstimator, TransformerMixin):
"""Non-Negative Matrix Factorization (NMF)
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is::
0.5 * ||X - WH||_Fro^2
+ alpha * l1_ratio * ||vec(W)||_1
+ alpha * l1_ratio * ||vec(H)||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
+ 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2
Where::
||A||_Fro^2 = \sum_{i,j} A_{ij}^2 (Frobenius norm)
||vec(A)||_1 = \sum_{i,j} abs(A_{ij}) (Elementwise L1 norm)
For multiplicative-update ('mu') solver, the Frobenius norm
(0.5 * ||X - WH||_Fro^2) can be changed into another beta-divergence loss,
by changing the beta_loss parameter.
The objective function is minimized with an alternating minimization of W
and H.
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all features
are kept.
init : 'random' | 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'custom'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options:
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
solver : 'cd' | 'mu'
Numerical solver to use:
'cd' is a Coordinate Descent solver.
'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or string, default 'frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros. Used only in 'mu' solver.
.. versionadded:: 0.19
tol : float, default: 1e-4
Tolerance of the stopping condition.
max_iter : integer, default: 200
Maximum number of iterations before timing out.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
.. versionadded:: 0.17
*alpha* used in the Coordinate Descent solver.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an elementwise L2 penalty
(aka Frobenius Norm).
For l1_ratio = 1 it is an elementwise L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
.. versionadded:: 0.17
Regularization parameter *l1_ratio* used in the Coordinate Descent
solver.
shuffle : boolean, default: False
If true, randomize the order of coordinates in the CD solver.
.. versionadded:: 0.17
*shuffle* parameter used in the Coordinate Descent solver.
Attributes
----------
components_ : array, [n_components, n_features]
Factorization matrix, sometimes called 'dictionary'.
reconstruction_err_ : number
Frobenius norm of the matrix difference, or beta-divergence, between
the training data ``X`` and the reconstructed data ``WH`` from
the fitted model.
n_iter_ : int
Actual number of iterations.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import NMF
>>> model = NMF(n_components=2, init='random', random_state=0)
>>> W = model.fit_transform(X)
>>> H = model.components_
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
def __init__(self, n_components=None, init=None, solver='cd',
beta_loss='frobenius', tol=1e-4, max_iter=200,
random_state=None, alpha=0., l1_ratio=0., verbose=0,
shuffle=False):
self.n_components = n_components
self.init = init
self.solver = solver
self.beta_loss = beta_loss
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
self.verbose = verbose
self.shuffle = shuffle
def fit_transform(self, X, y=None, W=None, H=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
W : array-like, shape (n_samples, n_components)
If init='custom', it is used as initial guess for the solution.
H : array-like, shape (n_components, n_features)
If init='custom', it is used as initial guess for the solution.
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), dtype=float)
W, H, n_iter_ = non_negative_factorization(
X=X, W=W, H=H, n_components=self.n_components, init=self.init,
update_H=True, solver=self.solver, beta_loss=self.beta_loss,
tol=self.tol, max_iter=self.max_iter, alpha=self.alpha,
l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
self.reconstruction_err_ = _beta_divergence(X, W, H, self.beta_loss,
square_root=True)
self.n_components_ = H.shape[0]
self.components_ = H
self.n_iter_ = n_iter_
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix to be transformed by the model
Returns
-------
W : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'n_components_')
W, _, n_iter_ = non_negative_factorization(
X=X, W=None, H=self.components_, n_components=self.n_components_,
init=self.init, update_H=False, solver=self.solver,
beta_loss=self.beta_loss, tol=self.tol, max_iter=self.max_iter,
alpha=self.alpha, l1_ratio=self.l1_ratio, regularization='both',
random_state=self.random_state, verbose=self.verbose,
shuffle=self.shuffle)
return W
def inverse_transform(self, W):
"""Transform data back to its original space.
Parameters
----------
W : {array-like, sparse matrix}, shape (n_samples, n_components)
Transformed data matrix
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data matrix of original shape
.. versionadded:: 0.18
"""
check_is_fitted(self, 'n_components_')
return np.dot(W, self.components_)
| bsd-3-clause |
jorik041/scikit-learn | sklearn/linear_model/logistic.py | 105 | 56686 | """
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in ['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
cbertinato/pandas | pandas/tests/dtypes/test_inference.py | 1 | 49345 | """
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
import collections
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from fractions import Fraction
from io import StringIO
from numbers import Number
import re
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, lib, missing as libmissing
import pandas.util._test_decorators as td
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
ensure_categorical, ensure_int32, is_bool, is_datetime64_any_dtype,
is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,
is_float, is_integer, is_number, is_scalar, is_scipy_sparse,
is_timedelta64_dtype, is_timedelta64_ns_dtype)
import pandas as pd
from pandas import (
Categorical, DataFrame, DateOffset, DatetimeIndex, Index, Interval, Period,
Series, Timedelta, TimedeltaIndex, Timestamp, isna)
from pandas.util import testing as tm
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(np.int64))
class A:
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1]), collections.defaultdict()])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize("ll", [
'1', 1, [1, 2], (1, 2), range(2), Index([1]),
dict, collections.defaultdict, Series
])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike:
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like():
class MockFile:
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass:
pass
class UnhashableClass1:
__hash__ = None
class UnhashableClass2:
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.abc.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', 'x',
r'asdf',
re.compile('adsf'),
r'\u2233\s*',
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference:
def test_infer_dtype_bytes(self):
compare = 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr, skipna=True) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr, skipna=True) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference:
# Dummy class used for testing with Python objects
class Dummy:
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
@pytest.mark.parametrize('skipna', [True, False])
def test_length_zero(self, skipna):
result = lib.infer_dtype(np.array([], dtype='i4'), skipna=skipna)
assert result == 'integer'
result = lib.infer_dtype([], skipna=skipna)
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'integer'
def test_deprecation(self):
# GH 24050
arr = np.array([1, 2, 3], dtype=object)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = lib.infer_dtype(arr) # default: skipna=None -> warn
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'decimal'
# complex is compatible with nan, so skipna has no effect
@pytest.mark.parametrize('skipna', [True, False])
def test_complex(self, skipna):
# gets cast to complex on array construction
arr = np.array([1.0, 2.0, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
arr = np.array([1.0, 2.0, 1 + 1j], dtype='O')
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'mixed'
# gets cast to complex on array construction
arr = np.array([1, np.nan, 1 + 1j])
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
arr = np.array([1.0, np.nan, 1 + 1j], dtype='O')
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'mixed'
# complex with nans stays complex
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype='O')
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
# test smaller complex dtype; will pass through _try_infer_map fastpath
arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == 'complex'
def test_string(self):
pass
def test_unicode(self):
arr = ['a', np.nan, 'c']
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
arr = ['a', np.nan, 'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr, skipna=True) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr, skipna=True) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr, skipna=True) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr, skipna=True) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr, skipna=True) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr, skipna=True) == 'empty'
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr, skipna=True) == 'empty'
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr, skipna=False) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr, skipna=False) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr, skipna=False) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates, skipna=False)
assert result == 'mixed'
result = lib.infer_dtype(dates, skipna=True)
assert result == 'date'
def test_is_numeric_array(self):
assert lib.is_float_array(np.array([1, 2.0]))
assert lib.is_float_array(np.array([1, 2.0, np.nan]))
assert not lib.is_float_array(np.array([1, 2]))
assert lib.is_integer_array(np.array([1, 2]))
assert not lib.is_integer_array(np.array([1, 2.0]))
def test_is_string_array(self):
assert lib.is_string_array(np.array(['foo', 'bar']))
assert not lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=False)
assert lib.is_string_array(
np.array(['foo', 'bar', np.nan], dtype=object), skipna=True)
assert not lib.is_string_array(np.array([1, 2]))
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr, skipna=False)
assert result == 'mixed'
result = lib.infer_dtype(arr, skipna=True)
assert result == 'empty'
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_is_period(self):
assert lib.is_period(pd.Period('2011-01', freq='M'))
assert not lib.is_period(pd.PeriodIndex(['2011-01'], freq='M'))
assert not lib.is_period(pd.Timestamp('2011-01'))
assert not lib.is_period(1)
assert not lib.is_period(np.nan)
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr, skipna=True)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == 'categorical'
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr, skipna=True)
assert result == 'categorical'
result = lib.infer_dtype(Series(arr), skipna=True)
assert result == 'categorical'
class TestNumberScalar:
def test_is_number(self):
assert is_number(True)
assert is_number(1)
assert is_number(1.1)
assert is_number(1 + 3j)
assert is_number(np.bool(False))
assert is_number(np.int64(1))
assert is_number(np.float64(1.1))
assert is_number(np.complex128(1 + 3j))
assert is_number(np.nan)
assert not is_number(None)
assert not is_number('x')
assert not is_number(datetime(2011, 1, 1))
assert not is_number(np.datetime64('2011-01-01'))
assert not is_number(Timestamp('2011-01-01'))
assert not is_number(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_number(timedelta(1000))
assert not is_number(Timedelta('1 days'))
# questionable
assert not is_number(np.bool_(False))
assert is_number(np.timedelta64(1, 'D'))
def test_is_bool(self):
assert is_bool(True)
assert is_bool(np.bool(False))
assert is_bool(np.bool_(False))
assert not is_bool(1)
assert not is_bool(1.1)
assert not is_bool(1 + 3j)
assert not is_bool(np.int64(1))
assert not is_bool(np.float64(1.1))
assert not is_bool(np.complex128(1 + 3j))
assert not is_bool(np.nan)
assert not is_bool(None)
assert not is_bool('x')
assert not is_bool(datetime(2011, 1, 1))
assert not is_bool(np.datetime64('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01'))
assert not is_bool(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_bool(timedelta(1000))
assert not is_bool(np.timedelta64(1, 'D'))
assert not is_bool(Timedelta('1 days'))
def test_is_integer(self):
assert is_integer(1)
assert is_integer(np.int64(1))
assert not is_integer(True)
assert not is_integer(1.1)
assert not is_integer(1 + 3j)
assert not is_integer(np.bool(False))
assert not is_integer(np.bool_(False))
assert not is_integer(np.float64(1.1))
assert not is_integer(np.complex128(1 + 3j))
assert not is_integer(np.nan)
assert not is_integer(None)
assert not is_integer('x')
assert not is_integer(datetime(2011, 1, 1))
assert not is_integer(np.datetime64('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01'))
assert not is_integer(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_integer(timedelta(1000))
assert not is_integer(Timedelta('1 days'))
# questionable
assert is_integer(np.timedelta64(1, 'D'))
def test_is_float(self):
assert is_float(1.1)
assert is_float(np.float64(1.1))
assert is_float(np.nan)
assert not is_float(True)
assert not is_float(1)
assert not is_float(1 + 3j)
assert not is_float(np.bool(False))
assert not is_float(np.bool_(False))
assert not is_float(np.int64(1))
assert not is_float(np.complex128(1 + 3j))
assert not is_float(None)
assert not is_float('x')
assert not is_float(datetime(2011, 1, 1))
assert not is_float(np.datetime64('2011-01-01'))
assert not is_float(Timestamp('2011-01-01'))
assert not is_float(Timestamp('2011-01-01', tz='US/Eastern'))
assert not is_float(timedelta(1000))
assert not is_float(np.timedelta64(1, 'D'))
assert not is_float(Timedelta('1 days'))
def test_is_datetime_dtypes(self):
ts = pd.date_range('20130101', periods=3)
tsa = pd.date_range('20130101', periods=3, tz='US/Eastern')
assert is_datetime64_dtype('datetime64')
assert is_datetime64_dtype('datetime64[ns]')
assert is_datetime64_dtype(ts)
assert not is_datetime64_dtype(tsa)
assert not is_datetime64_ns_dtype('datetime64')
assert is_datetime64_ns_dtype('datetime64[ns]')
assert is_datetime64_ns_dtype(ts)
assert is_datetime64_ns_dtype(tsa)
assert is_datetime64_any_dtype('datetime64')
assert is_datetime64_any_dtype('datetime64[ns]')
assert is_datetime64_any_dtype(ts)
assert is_datetime64_any_dtype(tsa)
assert not is_datetime64tz_dtype('datetime64')
assert not is_datetime64tz_dtype('datetime64[ns]')
assert not is_datetime64tz_dtype(ts)
assert is_datetime64tz_dtype(tsa)
for tz in ['US/Eastern', 'UTC']:
dtype = 'datetime64[ns, {}]'.format(tz)
assert not is_datetime64_dtype(dtype)
assert is_datetime64tz_dtype(dtype)
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_any_dtype(dtype)
def test_is_timedelta(self):
assert is_timedelta64_dtype('timedelta64')
assert is_timedelta64_dtype('timedelta64[ns]')
assert not is_timedelta64_ns_dtype('timedelta64')
assert is_timedelta64_ns_dtype('timedelta64[ns]')
tdi = TimedeltaIndex([1e14, 2e14], dtype='timedelta64[ns]')
assert is_timedelta64_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi)
assert is_timedelta64_ns_dtype(tdi.astype('timedelta64[ns]'))
# Conversion to Int64Index:
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64'))
assert not is_timedelta64_ns_dtype(tdi.astype('timedelta64[h]'))
class TestIsScalar:
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Number())
assert is_scalar(Fraction())
assert is_scalar(0.)
assert is_scalar(np.nan)
assert is_scalar('foobar')
assert is_scalar(b'foobar')
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1, ))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.))
assert is_scalar(np.int32(1))
assert is_scalar(np.object_('foobar'))
assert is_scalar(np.str_('foobar'))
assert is_scalar(np.unicode_('foobar'))
assert is_scalar(np.bytes_(b'foobar'))
assert is_scalar(np.datetime64('2014-01-01'))
assert is_scalar(np.timedelta64(1, 'h'))
def test_is_scalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h')),
np.array(np.datetime64('NaT'))]:
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scalar_numpy_arrays(self):
assert not is_scalar(np.array([]))
assert not is_scalar(np.array([[]]))
assert not is_scalar(np.matrix('1; 2'))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp('2014-01-01'))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period('2014-01-01'))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series())
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
def test_datetimeindex_from_empty_datetime64_array():
for unit in ['ms', 'us', 'ns']:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert (len(idx) == 0)
def test_nan_to_nat_conversions():
df = DataFrame(dict({
'A': np.asarray(range(10), dtype='float64'),
'B': Timestamp('20010101')
}))
df.iloc[3:6, :] = np.nan
result = df.loc[4, 'B'].value
assert (result == iNaT)
s = df['B'].copy()
s._data = s._data.setitem(indexer=tuple([slice(8, 9)]), value=np.nan)
assert (isna(s[8]))
assert (s[8].value == np.datetime64('NaT').astype(np.int64))
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_is_scipy_sparse(spmatrix): # noqa: F811
assert is_scipy_sparse(spmatrix([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = ensure_int32(values)
assert (result.dtype == np.int32)
values = np.arange(10, dtype=np.int64)
result = ensure_int32(values)
assert (result.dtype == np.int32)
def test_ensure_categorical():
values = np.arange(10, dtype=np.int32)
result = ensure_categorical(values)
assert (result.dtype == 'category')
values = Categorical(values)
result = ensure_categorical(values)
tm.assert_categorical_equal(result, values)
| bsd-3-clause |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/artist.py | 69 | 33042 | from __future__ import division
import re, warnings
import matplotlib
import matplotlib.cbook as cbook
from transforms import Bbox, IdentityTransform, TransformedBbox, TransformedPath
from path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = 1.0
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self.axes = None
self._remove_method = None
self._url = None
self.x_isdata = True # False to avoid updating Axes.dataLim with x
self.y_isdata = True # with y
self._snap = None
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should set
# the _remove_method attribute directly. This would be a protected
# attribute if Python supported that sort of thing. The callback
# has one parameter, which is the child to be removed.
if self._remove_method != None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property
# of whether or not the artist should affect the limits. Then there
# will be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None: return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
import traceback
L = []
try:
hascursor,info = self.contains(event)
if hascursor:
L.append(self)
except:
traceback.print_exc()
print "while checking",self.__class__
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains): return self._contains(self,mouseevent)
#raise NotImplementedError,str(self.__class__)+" needs 'contains' method"
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False,{}
def set_contains(self,picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside,prop = picker(self,mouseevent)
else:
inside,prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g. the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
"""
self._url = url
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
return self._snap
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
self._snap = snap
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
if not success:
print type(path), type(transform)
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible(): return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k,v in props.items():
func = getattr(self, 'set_'+k, None)
if func is None or not callable(func):
raise AttributeError('Unknown property %s'%k)
func(v)
changed = True
self.eventson = store
if changed: self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self.pchanged()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k,v in kwargs.items():
k = k.lower()
funcName = "set_%s"%k
func = getattr(self,funcName)
ret.extend( [func(v)] )
return ret
def findobj(self, match=None):
"""
pyplot signature:
findobj(o=gcf(), match=None)
Recursively find all :class:matplotlib.artist.Artist instances
contained in self.
*match* can be
- None: return all objects contained in artist (including artist)
- function with signature ``boolean = match(artist)`` used to filter matches
- class instance: eg Line2D. Only return artists of class type
.. plot:: mpl_examples/pylab_examples/findobj_demo.py
"""
if match is None: # always return True
def matchfunc(x): return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of
:class:`Artists`. If a sequence is used, we assume it is a
homogeneous sequence (all :class:`Artists` are of the same
type) and it is your responsibility to make sure this is so.
"""
if cbook.iterable(o) and len(o): o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
Eg., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and callable(getattr(self.o,name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func): continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
Eg., for a line linestyle, return
[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
"""
name = 'set_%s'%attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s'%(self.o,name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None: return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'): continue
o = getattr(self.o, name)
if not callable(o): continue
func = o
if self.is_alias(func): continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. Eg., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None: return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target) for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '='*col0_len + ' ' + '='*col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len+3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len+3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and callable(getattr(o, name))]
#print getters
getters.sort()
lines = []
for name in getters:
func = getattr(o, name)
if self.is_alias(func): continue
try: val = func()
except: continue
if getattr(val, 'shape', ()) != () and len(val)>6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s)>50:
s = s[:50] + '...'
name = self.aliased_name(name[4:])
lines.append(' %s = %s' %(name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: eg :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x): return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(o, property=None):
"""
Return the value of handle property. property is an optional string
for the property you want to return
Example usage::
getp(o) # get all the object properties
getp(o, 'linestyle') # get the linestyle property
*o* is a :class:`Artist` instance, eg
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
o.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(o)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
insp = ArtistInspector(o)
if property is None:
ret = insp.pprint_getters()
print '\n'.join(ret)
return
func = getattr(o, 'get_' + property)
return func()
# alias
get = getp
def setp(h, *args, **kwargs):
"""
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. E.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the matlab(TM) style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', r') # matlab style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(h)
if len(kwargs)==0 and len(args)==0:
print '\n'.join(insp.pprint_setters())
return
if len(kwargs)==0 and len(args)==1:
print insp.pprint_setters(prop=args[0])
return
if not cbook.iterable(h): h = [h]
else: h = cbook.flatten(h)
if len(args)%2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args)-1, 2):
funcvals.append((args[i], args[i+1]))
funcvals.extend(kwargs.items())
ret = []
for o in h:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s"%s
func = getattr(o,funcName)
ret.extend( [func(val)] )
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
kwdocd = dict()
kwdocd['Artist'] = kwdoc(Artist)
| gpl-3.0 |
cmorgan/pysystemtrade | syscore/accounting.py | 1 | 56500 | """
Suite of things to work out p&l, and statistics thereof
"""
from copy import copy, deepcopy
import pandas as pd
from pandas.tseries.offsets import BDay
import numpy as np
from scipy.stats import skew, ttest_rel, ttest_1samp
import scipy.stats as stats
import random
from syscore.algos import robust_vol_calc
from syscore.pdutils import drawdown
from syscore.dateutils import BUSINESS_DAYS_IN_YEAR, ROOT_BDAYS_INYEAR, WEEKS_IN_YEAR, ROOT_WEEKS_IN_YEAR
from syscore.dateutils import MONTHS_IN_YEAR, ROOT_MONTHS_IN_YEAR
"""
some defaults
"""
DEFAULT_CAPITAL = 10000000.0
DEFAULT_ANN_RISK_TARGET = 0.16
DEFAULT_DAILY_CAPITAL = (
DEFAULT_CAPITAL * DEFAULT_ANN_RISK_TARGET / ROOT_BDAYS_INYEAR)
def account_test(ac1, ac2):
"""
Given two Account like objects performs a two sided t test of normalised returns
:param ac1: first set of returns
:type ac1: accountCurve or pd.DataFrame of returns
:param ac2: second set of returns
:type ac2: accountCurve or pd.DataFrame of returns
:returns: 2 tuple: difference in means, t-test results
"""
common_ts = sorted(set(list(ac1.index)) & set(list(ac2.index)))
ac1_common = ac1.cumsum().reindex(common_ts, method="ffill").diff().values
ac2_common = ac2.cumsum().reindex(common_ts, method="ffill").diff().values
missing_values = [
idx for idx in range(len(common_ts))
if (np.isnan(ac1_common[idx]) or np.isnan(ac2_common[idx]))
]
ac1_common = [
ac1_common[idx] for idx in range(len(common_ts))
if idx not in missing_values
]
ac2_common = [
ac2_common[idx] for idx in range(len(common_ts))
if idx not in missing_values
]
ac1_common = ac1_common / np.nanstd(ac1_common)
ac2_common = ac2_common / np.nanstd(ac2_common)
diff = np.mean(ac1_common) - np.mean(ac2_common)
return (diff, ttest_rel(ac1_common, ac2_common))
def pandl_with_data(price,
trades=None,
marktomarket=True,
positions=None,
delayfill=True,
roundpositions=False,
get_daily_returns_volatility=None,
forecast=None,
fx=None,
daily_risk_capital=None,
value_of_price_point=1.0):
"""
Calculate pandl for an individual position
If marktomarket=True, and trades is provided, calculate pandl both at
open/close and mark to market in between
If trades is not provided, work out using positions. If delayfill is True,
assume we get filled at the next price after the trade
If roundpositions is True when working out trades from positions,
then round; otherwise assume we trade fractional lots
If positions are not provided, work out position using forecast and
volatility (this will be for an arbitrary daily risk target)
If volatility is not provided, work out from price
If fx is not provided, assume fx rate is 1.0 and work out p&l in currency
of instrument
If value_of_price_point is not provided, assume is 1.0 (block size is value
of 1 price point, eg 100 if you're buying 100 shares for one instrument
block)
:param price: price series
:type price: Tx1 pd.Series
:param trades: set of trades done NOT always aligned to price
:type trades: Tx2 pd.DataFrame columns ['trades', 'fill_price'] or None
:param marktomarket: If trades provided: Should we mark to market, or just
use traded prices?
:type marktomarket: bool
:param positions: series of positions NOT ALWAYS aligned to price
:type positions: Tx1 pd.Series or None
:param delayfill: If no trades provided: should we delay fills?
:type delayfill: bool
:param roundpositions: If no trades provided, should we round positions
when calculating trades?
:type roundpositions: bool
:param get_daily_returns_volatility: series of volatility estimates, used
for calculation of positions aligned to price
:type get_daily_returns_volatility: Tx1 pd.Series or None
:param forecast: series of forecasts, needed to work out positions if missing
:type forecast: Tx1 pd.Series or None
:param daily_risk_capital: needed to work out forecasts. If a time series
must be aligned to price
:type daily_risk_capital: Tx1 pd.Series or None or float
:param fx: series of fx rates from instrument currency to base currency, to
work out p&l in base currency aligned to price
:type fx: Tx1 pd.Series or None
:param value_of_price_point: value of one unit movement in price
:type value_of_price_point: float
:returns: 5- Tuple (positions, trades, instr_ccy_returns, base_ccy_returns,
fx) all Tx1 pd.DataFrames
"""
if price is None:
raise Exception("Can't work p&l without price")
if fx is None:
# assume it's 1.0
use_fx = pd.Series([1.0] * len(price.index), index=price.index)
else:
use_fx = fx.reindex(price.index, method="ffill")
if trades is None:
prices_to_use = copy(price)
if positions is None:
positions = get_positions_from_forecasts(
price, get_daily_returns_volatility, forecast, use_fx,
value_of_price_point, daily_risk_capital)
if roundpositions:
use_positions = positions.round()
else:
use_positions = copy(positions)
if delayfill:
use_positions = use_positions.shift(1)
cum_trades = use_positions.ffill()
trades_to_use = cum_trades.diff()
else:
# have some trades data
if marktomarket:
# want to have both kinds of price
prices_to_use = pd.concat(
[price, trades.fill_price], axis=1, join='outer')
# Where no fill price available, use price
prices_to_use = prices_to_use.fillna(axis=1, method="ffill")
prices_to_use = prices_to_use.fill_price
# alight trades
trades_to_use = trades.reindex(
prices_to_use.index, fill_value=0.0).trades
else:
# only calculate p&l on trades, using fills
trades_to_use = trades.trades
prices_to_use = trades.fill_price.ffill()
cum_trades = trades_to_use.cumsum().ffill()
price_returns = prices_to_use.ffill().diff()
instr_ccy_returns = cum_trades.shift(
1) * price_returns * value_of_price_point
instr_ccy_returns = instr_ccy_returns.cumsum().ffill().reindex(
price.index).diff()
base_ccy_returns = instr_ccy_returns * use_fx
return (cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns,
use_fx, value_of_price_point)
def get_positions_from_forecasts(price, get_daily_returns_volatility, forecast,
use_fx, value_of_price_point,
daily_risk_capital, **kwargs):
"""
Work out position using forecast, volatility, use_fx, value_of_price_point
(this will be for an arbitrary daily risk target)
If volatility is not provided, work out from price (uses a standard method
so may differ from precise system p&l)
:param price: price series
:type price: Tx1 pd.Series
:param get_daily_returns_volatility: series of volatility estimates. NOT %
volatility, price difference vol ALWAYS matched to price
:type get_daily_returns_volatility: Tx1 pd.Series or None
:param forecast: series of forecasts, needed to work out positions, MATCHED to price
:type forecast: Tx1 pd.Series
:param use_fx: series of fx rates from instrument currency to base currency, to
work out p&l in base currency, MATCHED to price
:type use_fx: Tx1 pd.Series
:param value_of_price_point: value of one unit movement in price
:type value_of_price_point: float
:param daily_risk_capital: Capital at risk
:type capital: float or None or pd.Series aligned to forecast
**kwargs: passed to vol calculation
:returns: Tx1 pd dataframe of positions
"""
if forecast is None:
raise Exception(
"If you don't provide a series of trades or positions, I need a "
"forecast")
if get_daily_returns_volatility is None:
get_daily_returns_volatility = robust_vol_calc(price.diff(), **kwargs)
"""
Herein the proof why this position calculation is correct (see chapters
5-11 of 'systematic trading' book)
Position = forecast x instrument weight x instrument_div_mult x vol_scalar / 10.0
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x instr value volatility)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x instr ccy volatility x fx rate)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x block value x % price volatility x fx rate)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x underlying price x 0.01 x value of price move x 100 x price change volatility/(underlying price) x fx rate)
= forecast x instrument weight x instrument_div_mult x daily cash vol target / (10.0 x value of price move x price change volatility x fx rate)
Making some arbitrary assumptions (one instrument, 100% of capital, daily target DAILY_CAPITAL):
= forecast x 1.0 x 1.0 x DAILY_CAPITAL / (10.0 x value of price move x price diff volatility x fx rate)
= forecast x multiplier / (value of price move x price change volatility x fx rate)
"""
if daily_risk_capital is None:
daily_risk_capital = DEFAULT_DAILY_CAPITAL
multiplier = daily_risk_capital * 1.0 * 1.0 / 10.0
denominator = (
value_of_price_point * get_daily_returns_volatility * use_fx)
numerator = forecast * multiplier
positions = numerator.ffill() / denominator.ffill()
return positions
def percent(accurve):
"""
Takes any account curve object
Returns accountCurveSingleElementOneFreq - anything else is lost
"""
pass
class accountCurveSingleElementOneFreq(pd.Series):
"""
A single account curve for one asset (instrument / trading rule variation, ...)
and one part of it (gross, net, costs)
and for one frequency (daily, weekly, monthly...)
Inherits from series
We never init these directly but only as part of accountCurveSingleElement
"""
def __init__(self, returns_df, capital, weighted_flag=False,
frequency="D"):
"""
:param returns_df: series of returns
:type returns_df: Tx1 pd.Series
:param weighted_flag: Does this curve have weighted returns?
:type weighted: bool
:param frequency: Frequency D days, W weeks, M months, Y years
:type frequency: str
:param capital: used to calculate extrapolated and % curves
:type capital: float or pd.Series
"""
super().__init__(returns_df)
try:
returns_scalar = dict(
D=BUSINESS_DAYS_IN_YEAR,
W=WEEKS_IN_YEAR,
M=MONTHS_IN_YEAR,
Y=1)[frequency]
vol_scalar = dict(
D=ROOT_BDAYS_INYEAR,
W=ROOT_WEEKS_IN_YEAR,
M=ROOT_MONTHS_IN_YEAR,
Y=1)[frequency]
except KeyError:
raise Exception("Not a frequency %s" % frequency)
setattr(self, "frequency", frequency)
setattr(self, "_returns_scalar", returns_scalar)
setattr(self, "_vol_scalar", vol_scalar)
setattr(self, "_returns_df", returns_df)
setattr(self, "weighted_flag", weighted_flag)
setattr(self, "capital", capital)
def as_df(self):
print("Deprecated accountCurve.as_df use .as_ts() please")
# backward compatibility
return self.as_ts()
def as_ts(self):
return pd.Series(self._returns_df)
def percent(self):
perc_returns = self.as_percent()
new_curve = accountCurveSingleElementOneFreq(
perc_returns, 100.0, self.weighted_flag, self.frequency)
return new_curve
def cumulative(self):
cum_returns = self.as_cumulative()
new_curve = accountCurveSingleElementOneFreq(
cum_returns, self.capital, self.weighted_flag, self.frequency)
return new_curve
def as_percent(self):
return 100.0 * self.as_ts() / self.capital
def as_cumulative(self):
if isinstance(self.capital, pd.core.series.Series):
print(
"You shouldn't cumulate returns when capital is varying. Using the first value of capital only"
)
use_capital = self.capital[0]
else:
use_capital = self.capital
perc_ac_returns = self.as_percent() / 100.0
cum_returns = (1.0 + perc_ac_returns).cumprod()
cum_returns = cum_returns * use_capital
return cum_returns.diff()
def curve(self):
# we cache this since it's used so much
if hasattr(self, "_curve"):
return self._curve
else:
curve = self.cumsum().ffill()
setattr(self, "_curve", curve)
return curve
def mean(self):
return float(self.as_ts().mean())
def std(self):
return float(self.as_ts().std())
def ann_mean(self):
avg = self.mean()
return avg * self._returns_scalar
def ann_std(self):
period_std = self.std()
return period_std * self._vol_scalar
def sharpe(self):
mean_return = self.ann_mean()
vol = self.ann_std()
try:
sharpe = mean_return / vol
except ZeroDivisionError:
sharpe = np.nan
return sharpe
def drawdown(self):
x = self.curve()
return drawdown(x)
def avg_drawdown(self):
dd = self.drawdown()
return np.nanmean(dd.values)
def worst_drawdown(self):
dd = self.drawdown()
return np.nanmin(dd.values)
def time_in_drawdown(self):
dd = self.drawdown()
dd = [z for z in dd.values if not np.isnan(z)]
in_dd = float(len([z for z in dd if z < 0]))
return in_dd / float(len(dd))
def calmar(self):
return self.ann_mean() / -self.worst_drawdown()
def avg_return_to_drawdown(self):
return self.ann_mean() / -self.avg_drawdown()
def sortino(self):
period_stddev = np.std(self.losses())
ann_stdev = period_stddev * self._vol_scalar
ann_mean = self.ann_mean()
try:
sortino = ann_mean / ann_stdev
except ZeroDivisionError:
sortino = np.nan
return sortino
def vals(self):
x = [z for z in self.values if not np.isnan(z)]
return x
def min(self):
return np.nanmin(self.vals())
def max(self):
return np.max(self.vals())
def median(self):
return np.median(self.vals())
def skew(self):
return skew(self.vals())
def losses(self):
x = self.vals()
return [z for z in x if z < 0]
def gains(self):
x = self.vals()
return [z for z in x if z > 0]
def avg_loss(self):
return np.mean(self.losses())
def avg_gain(self):
return np.mean(self.gains())
def gaintolossratio(self):
return self.avg_gain() / -self.avg_loss()
def profitfactor(self):
return sum(self.gains()) / -sum(self.losses())
def hitrate(self):
no_gains = float(len(self.gains()))
no_losses = float(len(self.losses()))
return no_gains / (no_losses + no_gains)
def rolling_ann_std(self, window=40):
y = self.as_ts().rolling(
window, min_periods=4, center=True).std().to_frame()
return y * self._vol_scalar
def t_test(self):
return ttest_1samp(self.vals(), 0.0)
def t_stat(self):
return float(self.t_test()[0])
def p_value(self):
return float(self.t_test()[1])
def stats(self):
stats_list = [
"min", "max", "median", "mean", "std", "skew", "ann_mean",
"ann_std", "sharpe", "sortino", "avg_drawdown", "time_in_drawdown",
"calmar", "avg_return_to_drawdown", "avg_loss", "avg_gain",
"gaintolossratio", "profitfactor", "hitrate", "t_stat", "p_value"
]
build_stats = []
for stat_name in stats_list:
stat_method = getattr(self, stat_name)
ans = stat_method()
build_stats.append((stat_name, "{0:.4g}".format(ans)))
comment1 = ("You can also plot / print:", [
"rolling_ann_std", "drawdown", "curve", "percent", "cumulative"
])
return [build_stats, comment1]
def __repr__(self):
if self.weighted_flag:
weight_comment = "Weighted"
else:
weight_comment = "Unweighted"
return super().__repr__() + \
"\n %s account curve; use object.stats() to see methods" % weight_comment
class accountCurveSingleElement(accountCurveSingleElementOneFreq):
"""
A single account curve for one asset (instrument / trading rule variation,
...) and one part of it (gross, net, costs)
Inherits from data frame
We never init these directly but only as part of accountCurveSingle
"""
def __init__(self, returns_df, capital, weighted_flag=False):
"""
:param returns_df: series of returns
:type returns_df: Tx1 pd.Series
:param weighted_flag: Is this account curve of weighted returns?
:type weighted_flag: bool
"""
# We often want to use
daily_returns = returns_df.resample("1B").sum()
weekly_returns = returns_df.resample("W").sum()
monthly_returns = returns_df.resample("MS").sum()
annual_returns = returns_df.resample("A").sum()
super().__init__(
daily_returns, capital, frequency="D", weighted_flag=weighted_flag)
setattr(self, "daily",
accountCurveSingleElementOneFreq(
daily_returns,
capital,
frequency="D",
weighted_flag=weighted_flag))
setattr(self, "weekly",
accountCurveSingleElementOneFreq(
weekly_returns,
capital,
frequency="W",
weighted_flag=weighted_flag))
setattr(self, "monthly",
accountCurveSingleElementOneFreq(
monthly_returns,
capital,
frequency="M",
weighted_flag=weighted_flag))
setattr(self, "annual",
accountCurveSingleElementOneFreq(
annual_returns,
capital,
frequency="Y",
weighted_flag=weighted_flag))
def __repr__(self):
return super().__repr__() + \
"\n Use object.freq.method() to access periods (freq=daily, weekly, monthly, annual) default: daily"
class accountCurveSingle(accountCurveSingleElement):
"""
A single account curve for one asset (instrument / trading rule variation, ...)
Inherits from data frame
On the surface we see the 'net' but there's also a gross and cost part included
"""
def __init__(self,
gross_returns,
net_returns,
costs,
capital,
weighted_flag=False):
"""
:param gross_returns: series of returns, no costs applied
:type gross_returns: Tx1 pd.Series
:param costs: series of costs (minus is a cost)
:type costs: Tx1 pd.Series
:param net_returns: series of costs (minus is a cost)
:type net_returns: Tx1 pd.Series
:param weighted_flag: Is this account curve of weighted returns?
:type weighted_flag: bool
:param capital: capital
:type capital: Tx1 pd.Series of float
"""
super().__init__(net_returns, capital, weighted_flag=weighted_flag)
setattr(self, "net",
accountCurveSingleElement(
net_returns, capital, weighted_flag=weighted_flag))
setattr(self, "gross",
accountCurveSingleElement(
gross_returns, capital, weighted_flag=weighted_flag))
setattr(self, "costs",
accountCurveSingleElement(
costs, capital, weighted_flag=weighted_flag))
def __repr__(self):
return super().__repr__() + \
"\n Use object.curve_type.freq.method() (freq=net, gross, costs) default: net"
def to_ncg_frame(self):
"""
View net gross and costs together
:returns: Tx3 pd.DataFrame
"""
ans = pd.concat(
[self.net.as_ts(), self.gross.as_ts(), self.costs.as_ts()], axis=1)
ans.columns = ["net", "gross", "costs"]
return ans
class accountCurve(accountCurveSingle):
def __init__(self,
price=None,
cash_costs=None,
SR_cost=None,
capital=None,
ann_risk_target=None,
pre_calc_data=None,
weighted_flag=False,
weighting=None,
apply_weight_to_costs_only=False,
**kwargs):
"""
Create an account curve; from which many lovely statistics can be gathered
We create by passing **kwargs which will be used by the pandl function
:param cash_cost: Cost in local currency units per instrument block
:type cash_cost: float
:param SR_cost: Cost in annualised Sharpe Ratio units (0.01 = 0.01 SR)
:type SR_cost: float
Note if both are included then cash_cost will be disregarded
:param capital: Capital at risk. Used for % returns, and calculating
daily risk for SR costs
:type capital: None, float, int, or Tx1
:param ann_risk_target: Annual risk target, as % of capital. Used to
calculate daily risk for SR costs
:type ann_risk_target: None or float
:param pre_calc_data: Used by the weighting function, to speed things
up and inherit pre-calculated stuff from an existing account curve
:type pre_calc_data: None or a big tuple
**kwargs passed to profit and loss calculation
(price, trades, marktomarket, positions,
delayfill, roundpositions,
get_daily_returns_volatility, forecast, use_fx,
value_of_price_point)
"""
if pre_calc_data:
(returns_data, base_capital, costs_base_ccy,
unweighted_instr_ccy_pandl) = pre_calc_data
(cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns,
use_fx, value_of_price_point) = returns_data
else:
"""
Capital is used for:
- going from forecast to position in profit and loss calculation
(fixed or a time series): daily_risk_capital
- calculating costs from SR costs (always a time series): ann_risk
- calculating percentage returns (maybe fixed or variable time
series): base_capital
"""
(base_capital, ann_risk, daily_risk_capital) = resolve_capital(
price, capital, ann_risk_target)
returns_data = pandl_with_data(
price, daily_risk_capital=daily_risk_capital, **kwargs)
(cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns,
use_fx, value_of_price_point) = returns_data
# always returns a time series
(costs_base_ccy, costs_instr_ccy) = calc_costs(
returns_data, cash_costs, SR_cost, ann_risk)
# keep track of this
unweighted_instr_ccy_pandl = dict(
gross=instr_ccy_returns,
costs=costs_instr_ccy,
net=instr_ccy_returns + costs_instr_ccy)
# Initially we have an unweighted version
self._calc_and_set_returns(
base_ccy_returns,
costs_base_ccy,
base_capital,
weighted_flag=weighted_flag,
weighting=weighting,
apply_weight_to_costs_only=apply_weight_to_costs_only)
# Save all kinds of useful statistics
setattr(self, "unweighted_instr_ccy_pandl", unweighted_instr_ccy_pandl)
setattr(self, "cum_trades", cum_trades)
setattr(self, "trades_to_use", trades_to_use)
setattr(self, "capital", base_capital)
setattr(self, "fx", use_fx)
setattr(self, "value_of_price_point", value_of_price_point)
def _calc_and_set_returns(self,
base_ccy_returns,
costs_base_ccy,
base_capital,
weighted_flag=False,
weighting=None,
apply_weight_to_costs_only=False):
"""
This hidden method is called when we setup the account curve, to
Also called again if we get a weighted version of this account curve
:param base_ccy_returns: Pre-cost returns in base currency terms (unweighted)
:type base_ccy_returns: Tx1 pd.Series
:param costs_base_ccy: Costs in base currency terms, aligned to base_ccy_returns (unweighted)
:type costs_base_ccy: Tx1 pd.Series
:param base_capital: base_capital in base currency terms
:type base_capital: Tx1 pd.Series (aligned to base_ccy_returns) or float
:param weighted_flag: Apply a weighting scheme, or not
:type weighted_flag: bool
:param weighting: Weighting scheme to apply to returns, MAY NOT BE aligned to base_ccy_returns
:type weighting: Tx1 pd.Series
:param apply_weight_to_costs_only: Apply weights only to costs, not gross returns
:type apply_weight_to_costs_only: bool
"""
if weighted_flag:
use_weighting = weighting.reindex(base_ccy_returns.index).ffill()
if not apply_weight_to_costs_only:
# only apply to gross returns if they aren't already weighted
base_ccy_returns = base_ccy_returns * use_weighting
# Always apply to costs
costs_base_ccy = costs_base_ccy * use_weighting
else:
use_weighting = None
net_base_returns = base_ccy_returns + \
costs_base_ccy # costs are negative returns
super().__init__(
base_ccy_returns,
net_base_returns,
costs_base_ccy,
base_capital,
weighted_flag=weighted_flag)
# save useful stats
# have to do this after super() call
setattr(self, "weighted_flag", weighted_flag)
setattr(self, "weighting", use_weighting)
def __repr__(self):
return super().__repr__(
) + "\n Use object.calc_data() to see calculation details"
def calc_data(self):
"""
Returns detailed calculation information
:returns: dictionary of float
"""
calc_items = [
"cum_trades", "trades_to_use", "unweighted_instr_ccy_pandl",
"capital", "weighting", "fx", "value_of_price_point"
]
calc_dict = dict([(calc_name, getattr(self, calc_name))
for calc_name in calc_items])
return calc_dict
def weighted(account_curve,
weighting,
apply_weight_to_costs_only=False,
allow_reweighting=False):
"""
Creates a copy of account curve with weights applied
:param account_curve: Curve to copy from
:type account_curve: accountCurve
:param weighting: Weighting scheme to apply to returns
:type weighting: Tx1 pd.Series
:param apply_weight_to_costs_only: Apply weights only to costs, not gross returns
:type apply_weight_to_costs_only: bool
:param allow_reweighting: Apply weights only to costs, not gross returns
:type allow_reweighting: bool
:returns: accountCurve
"""
if account_curve.weighted_flag:
if allow_reweighting:
pass
else:
raise Exception("You can't reweight weighted returns!")
# very clunky but I can't make copy, deepcopy or inheritance work for this
# use case...
base_capital = copy(account_curve.capital)
gross_returns = copy(account_curve.gross.as_ts())
costs_base_ccy = copy(account_curve.costs.as_ts())
unweighted_instr_ccy_pandl = copy(account_curve.unweighted_instr_ccy_pandl)
returns_data = (account_curve.cum_trades, account_curve.trades_to_use,
unweighted_instr_ccy_pandl["gross"], gross_returns,
account_curve.fx, account_curve.value_of_price_point)
pre_calc_data = (returns_data, base_capital, costs_base_ccy,
unweighted_instr_ccy_pandl)
# Create a cloned account curve with the pre calculated data
# recalculate the returns with weighting applied
new_account_curve = accountCurve(
pre_calc_data=pre_calc_data,
weighted_flag=True,
weighting=weighting,
apply_weight_to_costs_only=apply_weight_to_costs_only)
return new_account_curve
def calc_costs(returns_data, cash_costs, SR_cost, ann_risk):
"""
Calculate costs
:param returns_data: returns data
:type returns_data: 5 tuple returned by pandl data function
:param cash_costs: Cost in local currency units per instrument block
:type cash_costs: 3 tuple of floats; value_total_per_block, value_of_pertrade_commission, percentage_cost
:param SR_cost: Cost in annualised Sharpe Ratio units (0.01 = 0.01 SR)
:type SR_cost: float
Set to None if not using. If both included use SR_cost
:param ann_risk: Capital (capital * ann vol) at risk on annaulised basis. Used for SR calculations
:type ann_risk: Tx1 pd.Series
:returns : Tx1 pd.Series of costs. Minus numbers are losses
"""
(cum_trades, trades_to_use, instr_ccy_returns, base_ccy_returns, use_fx,
value_of_price_point) = returns_data
if SR_cost is not None:
# use SR_cost
ann_cost = -SR_cost * ann_risk
costs_instr_ccy = ann_cost / BUSINESS_DAYS_IN_YEAR
elif cash_costs is not None:
# use cost per blocks
(value_total_per_block, value_of_pertrade_commission,
percentage_cost) = cash_costs
trades_in_blocks = trades_to_use.abs()
costs_blocks = -trades_in_blocks * value_total_per_block
value_of_trades = trades_in_blocks * value_of_price_point
costs_percentage = percentage_cost * value_of_trades
traded = trades_to_use[trades_to_use > 0]
if len(traded) == 0:
costs_pertrade = pd.Series([0.0] * len(cum_trades.index),
cum_trades.index)
else:
costs_pertrade = pd.Series(
[value_of_pertrade_commission] * len(traded.index),
traded.index)
costs_pertrade = costs_pertrade.reindex(trades_to_use.index)
# everything on the trades index, so can do this:s
costs_instr_ccy = costs_blocks + costs_percentage + costs_pertrade
else:
# set costs to zero
costs_instr_ccy = pd.Series([0.0] * len(use_fx), index=use_fx.index)
# fx is on master (price timestamp)
# costs_instr_ccy needs downsampling
costs_instr_ccy = costs_instr_ccy.cumsum().ffill().reindex(
use_fx.index).diff()
costs_base_ccy = costs_instr_ccy * use_fx.ffill()
costs_base_ccy[np.isnan(costs_base_ccy)] = 0.0
return (costs_base_ccy, costs_instr_ccy)
def resolve_capital(ts_to_scale_to, capital=None, ann_risk_target=None):
"""
Resolve and setup capital
We need capital for % returns and possibly for SR stuff
Capital is used for:
- going from forecast to position in profit and loss calculation (fixed
or a time series): daily_risk_capital
- calculating costs from SR costs (always a time series): ann_risk
- calculating percentage returns (maybe fixed or variable time series):
capital
:param ts_to_scale_to: If capital is fixed, what time series to scale it to
:type capital: Tx1 pd.DataFrame
:param capital: Capital at risk. Used for % returns, and calculating daily
risk for SR costs
:type capital: None, int, float or Tx1 pd.DataFrame
:param ann_risk_target: Annual risk target, as % of capital 0.10 is 10%.
Used to calculate daily risk for SR costs
:type ann_risk_target: None or float
:returns tuple: 3 tuple of Tx1 pd.Series / float, pd.Series, pd.Series or float
(capital, ann_risk, daily_risk_capital)
"""
if capital is None:
base_capital = copy(DEFAULT_CAPITAL)
else:
base_capital = copy(capital)
if ann_risk_target is None:
ann_risk_target = DEFAULT_ANN_RISK_TARGET
# might be a float or a Series, depending on capital
daily_risk_capital = base_capital * ann_risk_target / ROOT_BDAYS_INYEAR
if isinstance(base_capital, float) or isinstance(base_capital, int):
ts_capital = pd.Series(
[base_capital] * len(ts_to_scale_to), index=ts_to_scale_to.index)
base_capital = float(base_capital)
else:
ts_capital = copy(base_capital)
# always a time series
ann_risk = ts_capital * ann_risk_target
return (base_capital, ann_risk, daily_risk_capital)
def acc_list_to_pd_frame(list_of_ac_curves, asset_columns):
"""
Returns a pandas data frame
:param list_of_ac_curves: Elements to include
:type list_of_ac_curves: list of any accountcurve like object
:param asset_columns: Names of each asset
:type asset_columns: list of str
:returns: TxN pd.DataFrame
"""
list_of_df = [acc.as_ts() for acc in list_of_ac_curves]
ans = pd.concat(list_of_df, axis=1, join="outer")
ans.columns = asset_columns
ans = ans.cumsum().ffill().diff()
return ans
def total_from_list(list_of_ac_curves, asset_columns, capital):
"""
Return a single accountCurveSingleElement whose returns are the total across the portfolio
:param acc_curve_for_type_list: Elements to include in group
:type acc_curve_for_type_list: list of accountCurveSingleElement
:param asset_columns: Names of each asset
:type asset_columns: list of str
:param capital: Capital, if None will discover from list elements
:type capital: None, float, or pd.Series
:returns: 2 tuple of pd.Series
"""
pdframe = acc_list_to_pd_frame(list_of_ac_curves, asset_columns)
def _resolve_capital_for_total(capital, pdframe):
if isinstance(capital, float):
return pd.Series([capital] * len(pdframe), pdframe.index)
else:
return capital
def _all_float(list_of_ac_curves):
curve_type_float = [isinstance(x, float) for x in list_of_ac_curves]
return all(curve_type_float)
def _resolve_capital_list(pdframe, list_of_ac_curves, capital):
if capital is not None:
return capital
if _all_float(list_of_ac_curves):
capital = np.mean([x.capital for x in list_of_ac_curves])
return
# at least some time series
capital = pd.concat(
[
_resolve_capital_for_total(x.capital, pdframe)
for x in list_of_ac_curves
],
axis=1)
# should all be the same, but just in case ...
capital = np.mean(capital, axis=1)
capital = capital.reindex(pdframe.index).ffill()
return capital
# all on daily freq so just add up
totalac = pdframe.sum(axis=1)
capital = _resolve_capital_list(pdframe, list_of_ac_curves, capital)
return (totalac, capital)
class accountCurveGroupForType(accountCurveSingleElement):
"""
an accountCurveGroup for one cost type (gross, net, costs)
"""
def __init__(self,
acc_curve_for_type_list,
asset_columns,
capital=None,
weighted_flag=False,
curve_type="net"):
"""
Create a group of account curves from a list and some column names
looks like accountCurveSingleElement; outward facing is the total p&L
so acc=accountCurveGroupForType()
acc.mean() ## for the total
Also you can access a instrument (gives an accountCurveSingleElement for an instrument):
acc[instrument_code].mean(), acc[instrument_code].mean()
acc.instrument_code.gross.daily.stats()
acc.to_frame() ## returns a data frame
If you want the original list back:
acc.to_list
Also: eg acc.get_stats("mean", freq="daily")
... Returns a dict of stats
:param acc_curve_for_type_list: Elements to include in group
:type acc_curve_for_type_list: list of accountCurveSingleElement
:param asset_columns: Names of each asset
:type asset_columns: list of str
:param curve_type: Net, gross or costs?
:type curve_type: str
:param weighted_flag: Is this account curve of weighted returns?
:type weighted_flag: bool
:param capital: Capital, if None will discover from list elements
:type capital: None, float, or pd.Series
"""
(acc_total, capital) = total_from_list(acc_curve_for_type_list,
asset_columns, capital)
super().__init__(
acc_total, weighted_flag=weighted_flag, capital=capital)
setattr(self, "to_list", acc_curve_for_type_list)
setattr(self, "asset_columns", asset_columns)
setattr(self, "curve_type", curve_type)
def __getitem__(self, colname):
"""
Overriding this method to access individual curves
:returns: accountCurveSingleElement
"""
try:
ans = self.to_list[self.asset_columns.index(colname)]
except ValueError:
raise Exception("%s not found in account curve" % colname)
return ans
def to_frame(self):
"""
Returns as a data frame, one column is an assets returns
:returns: TxN pd.DataFrame
"""
return acc_list_to_pd_frame(self.to_list, self.asset_columns)
def get_stats(self, stat_method, freq="daily", percent=True):
"""
Create a dictionary summarising statistics across a group of account curves
:param stat_method: Any method of accountCurveSingleElementOneFreq
:type stat_method: str
:param freq: frequency; daily, weekly, monthly or annual
:type freq: str
:param percent: get % returns
:type percent: bool
:returns: statsDict
"""
return statsDict(self, stat_method, freq, percent)
def time_weights(self):
"""
Returns a dict, values are weights according to how much data we have
:returns: dict of floats
"""
def _len_nonzero(ac_curve):
return_df = ac_curve.as_ts()
ans = len([x for x in return_df.values if not np.isnan(x)])
return ans
time_weights_dict = dict(
[(asset_name, _len_nonzero(ac_curve))
for (asset_name,
ac_curve) in zip(self.asset_columns, self.to_list)])
total_weight = sum(time_weights_dict.values())
time_weights_dict = dict([(asset_name, weight / total_weight)
for (asset_name,
weight) in time_weights_dict.items()])
return time_weights_dict
class statsDict(dict):
def __init__(self,
acgroup_for_type,
stat_method,
freq="daily",
percent=True):
"""
Create a dictionary summarising statistics across a group of account curves
:param acgroup_for_type: Account curve group to analyse
:type acgroup_for_type: accountCurveGroupForType
:param stat_method: Any method of accountCurveSingleElementOneFreq
:type stat_method: str
:param freq: frequency; daily, weekly, monthly or annual
:type freq: str
"""
column_names = acgroup_for_type.asset_columns
def _get_stat_from_acobject(acobject, stat_method, freq, percent):
freq_obj = getattr(acobject, freq)
if percent:
freq_obj = freq_obj.percent()
stat_method_function = getattr(freq_obj, stat_method)
return stat_method_function()
dict_values = [(col_name, _get_stat_from_acobject(
acgroup_for_type[col_name], stat_method, freq, percent))
for col_name in column_names]
super().__init__(dict_values)
# We need to augment this with time weightings, in case they are needed
setattr(self, "time_weightings", acgroup_for_type.time_weights())
def weightings(self, timeweighted=False):
"""
Returns a dict of weightings
Either equal weighting, or returns time_weightings
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: dict of floats
"""
if timeweighted:
return self.time_weightings
else:
return dict([(asset_name, 1.0 / len(self.values()))
for asset_name in self.keys()])
def mean(self, timeweighted=False):
"""
Return cross sectional mean of statistics
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
wts = self.weightings(timeweighted)
ans = sum([
asset_value * wts[asset_name]
for (asset_name, asset_value) in self.items()
])
return ans
def std(self, timeweighted=False):
"""
Return cross sectional standard deviation of statistics
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
wts = self.weightings(timeweighted)
avg = self.mean(timeweighted)
ans = sum([
wts[asset_name] * (asset_value - avg)**2
for (asset_name, asset_value) in self.items()
])**.5
return ans
def tstat(self, timeweighted=False):
"""
Determine if cross section of statistics is significantly different from zero
High t statistic = yes
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
t_mean = self.mean(timeweighted)
t_std = self.std(timeweighted)
if t_std == 0.0:
return np.nan
return t_mean / t_std
def pvalue(self, timeweighted=False):
"""
Determine if cross section of statistics is significantly different from zero
Low p value = yes
:param timeweighted: Time weight statistics or use simple average
:type: timeweighted: bool
:returns: float
"""
tstat = self.tstat(timeweighted)
n = len(self.values())
if np.isnan(tstat) or n < 2:
return np.nan
pvalue = stats.t.sf(np.abs(tstat), n - 1) # one sided t statistic
return pvalue
class accountCurveGroup(accountCurveSingleElement):
def __init__(self,
acc_curve_list,
asset_columns,
capital=None,
weighted_flag=None):
"""
Create a group of account curves from a list and some column names
looks like accountCurve, so outward facing is the total p&L
No weighting is done, so returns of the total will be simple addition
so acc=accountCurveGroup()
acc.mean()
acc.net.mean()
acc.net.daily.mean()
Also you can access a instrument:
acc[instrument_code].mean(), acc[instrument_code].net.mean()
acc.instrument_code.gross.daily.stats()
acc.to_frame() ## returns a data frame
acc.to_frame("gross") ## returns a data frame
acc.costs.to_frame() ## returns a data frame
If you want the original list back:
acc.to_list
Also: eg acc.get_stats("mean", curve_type="net", freq="daily")
acc.net.get_stats("sharpe", freq="weekly")
... Returns a list of stats
:param acc_curve_list: Curves to group together
:type acc_curve_list: list of accountCurve() objects
:param asset_columns: Names of each asset (same order as acc_curve_list)
:type asset_columns: list of str
:param capital: Capital, if None will discover from list elements
:type capital: None, float, or pd.Series
:param weighted_flag: Is this a weighted_flag account curve? If None then inherits from list.
:type weighted_flag: None or bool
"""
if weighted_flag is None:
weighted_flag = [x.weighted_flag for x in acc_curve_list]
if any(weighted_flag):
if not (all(weighted_flag)):
raise Exception(
"Can't mix weighted_flag and unweighted account curves")
else:
weighted_flag = True
else:
weighted_flag = False
net_list = [getattr(x, "net") for x in acc_curve_list]
gross_list = [getattr(x, "gross") for x in acc_curve_list]
costs_list = [getattr(x, "costs") for x in acc_curve_list]
acc_list_net = accountCurveGroupForType(
net_list,
asset_columns=asset_columns,
capital=capital,
weighted_flag=weighted_flag,
curve_type="net")
acc_list_gross = accountCurveGroupForType(
gross_list,
asset_columns=asset_columns,
capital=capital,
weighted_flag=weighted_flag,
curve_type="gross")
acc_list_costs = accountCurveGroupForType(
costs_list,
asset_columns=asset_columns,
capital=capital,
weighted_flag=weighted_flag,
curve_type="costs")
(acc_total, capital) = total_from_list(net_list, asset_columns,
capital)
super().__init__(
acc_total, weighted_flag=weighted_flag, capital=capital)
setattr(self, "net", acc_list_net)
setattr(self, "gross", acc_list_gross)
setattr(self, "costs", acc_list_costs)
setattr(self, "to_list", acc_curve_list)
setattr(self, "asset_columns", asset_columns)
def __repr__(self):
return super().__repr__() + "\n Multiple curves. Use object.curve_type (curve_type= net, gross, costs)" + \
"\n Useful methods: to_list, asset_columns(), get_stats(), to_frame()"
def __getitem__(self, colname):
"""
Overriding this method to access individual curves
Returns an object of type accountCurve
"""
try:
ans = self.to_list[self.asset_columns.index(colname)]
except ValueError:
raise Exception("%s not found in account curve" % colname)
return ans
def get_stats(self, stat_method, curve_type="net", freq="daily"):
"""
Returns a dict of stats, one per asset
:param stat_method: Any method of accountCurveSingleElementOneFreq
:type stat_method: str
:param curve_type: gross, net or costs
:type curve_type: str
:param freq: frequency; daily, weekly, monthly or annual
:type freq: str
:returns: statsDict, dict like object
"""
subobject = getattr(self, curve_type)
return subobject.get_stats(stat_method, freq=freq)
def to_frame(self, curve_type="net"):
"""
Returns individual return curves as a data frame
:param curve_type: gross, net or costs
:type curve_type: str
:returns: pd.Dataframe TxN
"""
actype = getattr(self, curve_type)
return actype.to_frame()
def stack(self):
"""
Collapse instrument level data into a list of returns in a stack_returns object (pd.TimeSeries)
We can bootstrap this or perform other statistics
:returns: returnStack
"""
returnsStack(self.to_list)
def to_ncg_frame(self):
"""
Returns total account curves for net, gross and costs in a dataframe
:returns: Tx3 pd.Dataframe
"""
ans = pd.concat(
[self.net.as_ts(), self.gross.as_ts(), self.costs.as_ts()], axis=1)
ans.columns = ["net", "gross", "costs"]
return ans
class returnsStack(accountCurveSingle):
"""
Create a stack of returns which we can bootstrap
"""
def __init__(self, returns_list):
"""
Create a stack of returns which we can bootstrap
:param returns_list: returns to be bootstrapped
:type returns_list: List of accountCurve() objects
"""
# Collapse indices to a single one
bs_index_to_use = [list(returns.index) for returns in returns_list]
bs_index_to_use = sum(bs_index_to_use, [])
bs_index_to_use = sorted(set(bs_index_to_use))
# Collapse return lists
curve_type_list = ["gross", "net", "costs"]
def _collapse_one_curve_type(returns_list, curve_type):
collapsed_values = sum([
list(getattr(returns, curve_type).iloc[:, 0].values)
for returns in returns_list
], [])
return collapsed_values
collapsed_curves_values = dict([(curve_type, _collapse_one_curve_type(
returns_list, curve_type)) for curve_type in curve_type_list])
# We set this to an arbitrary index so we can make an account curve
gross_returns_df = pd.Series(
collapsed_curves_values["gross"],
pd.date_range(
start=bs_index_to_use[0],
periods=len(collapsed_curves_values["gross"]),
freq="B"))
net_returns_df = pd.Series(
collapsed_curves_values["net"],
pd.date_range(
start=bs_index_to_use[0],
periods=len(collapsed_curves_values["net"]),
freq="B"))
costs_returns_df = pd.Series(
collapsed_curves_values["costs"],
pd.date_range(
start=bs_index_to_use[0],
periods=len(collapsed_curves_values["costs"]),
freq="B"))
super().__init__(gross_returns_df, net_returns_df, costs_returns_df)
# We need to store this for bootstrapping purposes
setattr(self, "_bs_index_to_use", bs_index_to_use)
def bootstrap(self, no_runs=50, length=None):
"""
Create an accountCurveGroup object containing no_runs, each same length as the
original portfolio (unless length is set)
:param no_runs: Number of runs to do
:type no_runs: int
:param length: Length of each run
:type length: int
:returns: accountCurveGroup, one element for each of no_runs
"""
values_to_sample_from = dict(
gross=list(getattr(self, "gross").iloc[:, 0].values),
net=list(getattr(self, "net").iloc[:, 0].values),
costs=list(getattr(self, "costs").iloc[:, 0].values))
size_of_bucket = len(self.index)
if length is None:
index_to_use = self._bs_index_to_use
length = len(index_to_use)
else:
index_to_use = pd.date_range(
start=self._bs_index_to_use[0], periods=length, freq="B")
bs_list = []
for notUsed in range(no_runs):
sample = [
int(round(random.uniform(0, size_of_bucket - 1)))
for notUsed2 in range(length)
]
# each element of accountCurveGroup is an accountCurveSingle
bs_list.append(
accountCurveSingle(
pd.Series(
[
values_to_sample_from["gross"][xidx]
for xidx in sample
],
index=index_to_use),
pd.Series(
[
values_to_sample_from["net"][xidx]
for xidx in sample
],
index=index_to_use),
pd.Series(
[
values_to_sample_from["costs"][xidx]
for xidx in sample
],
index=index_to_use)))
asset_columns = ["b%d" % idx for idx in range(no_runs)]
return accountCurveGroup(bs_list, asset_columns)
def _DEPRECATED_get_trades_from_positions(
price, positions, delayfill, roundpositions,
get_daily_returns_volatility, forecast, fx, value_of_price_point,
daily_capital):
"""
Work out trades implied by a series of positions
If delayfill is True, assume we get filled at the next price after the
trade
If roundpositions is True when working out trades from positions, then
round; otherwise assume we trade fractional lots
If positions are not provided, work out position using forecast and
volatility (this will be for an arbitrary daily risk target)
If volatility is not provided, work out from price
Args:
price (Tx1 pd.DataFrame): price series
positions (Tx1 pd.DataFrame or None): (series of positions)
delayfill (bool): If calculating trades, should we round positions
first?
roundpositions (bool): If calculating trades, should we round positions
first?
get_daily_returns_volatility (Tx1 pd.DataFrame or None): series of
volatility estimates, used for calculation positions
forecast (Tx1 pd.DataFrame or None): series of forecasts, needed to
work out positions
fx (Tx1 pd.DataFrame or None): series of fx rates from instrument
currency to base currency, to work out p&l in base currency
block_size (float): value of one movement in price
Returns:
Tx1 pd dataframe of trades
"""
if roundpositions:
# round to whole positions
round_positions = positions.round()
else:
round_positions = copy(positions)
# deal with edge cases where we don't have a zero position initially, or
# leading nans
first_row = pd.Series([0.0], index=[round_positions.index[0] - BDay(1)])
round_positions = pd.concat([first_row, round_positions], axis=0)
round_positions = round_positions.ffill()
trades = round_positions.diff()
if delayfill:
# fill will happen one day after we generated trade (being
# conservative)
trades.index = trades.index + pd.DateOffset(1)
# put prices on to correct timestamp
(trades, align_price) = trades.align(price, join="outer")
ans = pd.concat([trades, align_price], axis=1)
ans.columns = ['trades', 'fill_price']
# fill will happen at next valid price if it happens to be missing
ans.fill_price = ans.fill_price.fillna(method="bfill")
# remove zeros (turns into nans)
ans = ans[ans.trades != 0.0]
ans = ans[np.isfinite(ans.trades)]
return ans
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
dmnfarrell/epitopepredict | epitopepredict/plotting.py | 1 | 37189 | #!/usr/bin/env python
"""
epitopepredict plotting
Created February 2016
Copyright (C) Damien Farrell
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import absolute_import, print_function
import sys, os, math
from collections import OrderedDict
try:
import matplotlib
matplotlib.use('agg', warn=False)
import pylab as plt
except:
pass
import numpy as np
import pandas as pd
from . import base
colormaps={'tepitope':'Greens','netmhciipan':'Oranges','iedbmhc2':'Pinks',
'iedbmhc1':'Blues'}
defaultcolors = {'tepitope':'green','netmhciipan':'orange','basicmhc1':'yellow',
'iedbmhc1':'blue','iedbmhc2':'pink'}
def plot_heatmap(df, ax=None, figsize=(6,6), **kwargs):
"""Plot a generic heatmap """
if ax==None:
fig=plt.figure(figsize=figsize)
ax=fig.add_subplot(111)
else:
fig = ax.get_figure()
df = df._get_numeric_data()
hm = ax.pcolor(df, **kwargs)
#fig.colorbar(hm, ax=ax)
ax.set_xticks(np.arange(0.5, len(df.columns)))
ax.set_yticks(np.arange(0.5, len(df.index)))
ax.set_xticklabels(df.columns, minor=False, fontsize=14,rotation=90)
ax.set_yticklabels(df.index, minor=False, fontsize=14)
ax.set_ylim(0, len(df.index))
hm.set_clim(0,1)
#ax.grid(True)
plt.tight_layout()
return ax
def get_seq_from_binders(P, name=None):
"""Get sequence from binder data. Probably better to store the sequences in the object?"""
if P.data is None or len(P.data)==0:
return
if name is not None:
data=P.data[P.data.name==name]
else:
data=P.data
l = len(data.iloc[0].peptide)
seqlen = data.pos.max()+l
return seqlen
def get_bokeh_colors(palette='Set1'):
from bokeh.palettes import brewer
n = len(base.predictors)
pal = brewer[palette][n]
i=0
clrs={}
for m in base.predictors:
clrs[m] = pal[i]
i+=1
return clrs
def get_sequence_colors(seq):
"""Get colors for a sequence"""
from bokeh.palettes import brewer, viridis, plasma
from Bio.PDB.Polypeptide import aa1
pal = plasma(20)
pal.append('white')
aa1 = list(aa1)
aa1.append('-')
pcolors = {i:j for i,j in zip(aa1,pal)}
text = list(seq)
clrs = {'A':'red','T':'green','G':'orange','C':'blue','-':'white'}
try:
colors = [clrs[i] for i in text]
except:
colors = [pcolors[i] for i in text]
return colors
def bokeh_test(n=20,height=400):
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.models.glyphs import Text, Rect, Circle
data = {'x_values': np.random.random(n),
'y_values': np.random.random(n)}
source = ColumnDataSource(data=data)
tools = "pan,wheel_zoom,hover,tap,reset,save"
p = figure(plot_height=height,tools=tools)
c = Circle(x='x_values', y='y_values', radius=.02, line_color='black', fill_color='blue', fill_alpha=.6)
#p.circle(x='x_values', y='y_values', radius=.02, line_color='black', fill_color='blue', fill_alpha=.6, source=source)
p.add_glyph(source, c)
return p
def bokeh_summary_plot(df, savepath=None):
"""Summary plot"""
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.models import ColumnDataSource,Range1d,HoverTool,TapTool,CustomJS,OpenURL
TOOLS = "pan,wheel_zoom,hover,tap,reset,save"
colors = get_bokeh_colors()
df=df.rename(columns={'level_0':'predictor'})
df['color'] = [colors[x] for x in df['predictor']]
p = figure(title = "Summary", tools=TOOLS, width=500, height=500)
p.xaxis.axis_label = 'binder_density'
p.yaxis.axis_label = 'binders'
#make metric for point sizes
#df['point_size'] = df.binder_density
source = ColumnDataSource(data=df)
p.circle(x='binder_density', y='binders', line_color='black', fill_color='color',
fill_alpha=0.4, size=10, source=source, legend_group='predictor')
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("name", "@name"),
("length", "@length"),
("binders", "@binders"),
("binder_density", "@binder_density"),
("top_peptide", "@top_peptide"),
("max_score", "@max_score"),
])
p.toolbar.logo = None
if savepath != None:
url = "http://localhost:8000/sequence?savepath=%s&name=@name" %savepath
taptool = p.select(type=TapTool)
taptool.callback = OpenURL(url=url)
callback = CustomJS(args=dict(source=source), code="""
var data = source.data;
var f = cb_obj.value
data['x'] = f
source.trigger('change');
source.change.emit();
""")
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Select
menu = [(i,i) for i in df.columns]
select = Select(title='X', value='A', options=list(df.columns), width=8)
select.js_on_change('value', callback)
#layout = column(p, select, sizing_mode='scale_width')
return p
def bokeh_plot_tracks(preds, title='', n=2, name=None, cutoff=.95, cutoff_method='default',
width=None, height=None, x_range=None, tools=True, palette='Set1',
seqdepot=None, exp=None):
"""
Plot binding predictions as parallel tracks of blocks for each allele.
This uses Bokeh.
Args:
title: plot title
n: min alleles to display
name: name of protein to show if more than one in data
Returns: a bokeh figure for embedding or displaying in a notebook
"""
from collections import OrderedDict
from bokeh.models import Range1d, HoverTool, FactorRange, ColumnDataSource, Text, Rect
from bokeh.plotting import figure
if tools == True:
tools="xpan, xwheel_zoom, hover, reset, save"
else:
tools=''
if width == None:
width=1000
sizing_mode='scale_width'
else:
sizing_mode='fixed'
alls=1
seqlen=0
for P in preds:
if P.data is None or len(P.data)==0:
continue
seqlen = get_seq_from_binders(P, name=name)
#print (seqlen)
alls += len(P.data.groupby('allele'))
if seqlen == 0:
return
if height==None:
height = 140+10*alls
if x_range == None:
x_range = Range1d(0, seqlen, bounds='auto')
yrange = Range1d(start=0, end=alls+3)
plot = figure(title=title, plot_width=width, sizing_mode=sizing_mode,
plot_height=height, y_range=yrange, x_range=x_range,
y_axis_label='allele',
tools=tools)
h=3
if exp is not None:
plotExp(plot, exp)
colors = get_bokeh_colors(palette)
x=[];y=[];allele=[];widths=[];clrs=[];peptide=[]
predictor=[];position=[];score=[];leg=[];seqs=[];text=[]
l=80
i=0
for pred in preds:
m = pred.name
df = pred.data
seq = base.sequence_from_peptides(df)
if df is None or len(df) == 0:
print('no data to plot for %s' %m)
continue
if name != None:
df = df[df.name==name]
sckey = pred.scorekey
binders = pred.get_binders(name=name, cutoff=cutoff, cutoff_method=cutoff_method)
#print (cutoff, n)
pb = pred.promiscuous_binders(n=n, name=name, cutoff=cutoff, cutoff_method=cutoff_method)
if len(pb) == 0:
continue
l = base.get_length(pb)
grps = df.groupby('allele')
alleles = grps.groups.keys()
#seqs.extend([seq for i in alleles])
#t = [i for s in list(seqs) for i in s]
#text.extend(t)
if len(pb)==0:
continue
c = colors[m]
leg.append(m)
seqlen = df.pos.max()+l
for a,g in grps:
b = binders[binders.allele==a]
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort_values('pos',inplace=True)
scores = b[sckey].values
score.extend(scores)
pos = b['pos'].values
position.extend(pos)
x.extend(pos+(l/2.0)) #offset as coords are rect centers
widths.extend([l for i in scores])
clrs.extend([c for i in scores])
y.extend([h+0.5 for i in scores])
alls = [a for i in scores]
allele.extend(alls)
peptide.extend(list(b.peptide.values))
predictor.extend([m for i in scores])
h+=1
i+=1
data = dict(x=x,y=y,allele=allele,peptide=peptide,width=widths,color=clrs,
predictor=predictor,position=position,score=score)
source = ColumnDataSource(data=data)
plot.rect(x='x', y='y', source=source, width='width', height=0.8,
legend_group='predictor',
color='color',line_color='gray',alpha=0.7)
#glyph = Text(x="x", y="y", text="text", text_align='center', text_color="black",
# text_font="monospace", text_font_size="10pt")
#plot.add_glyph(source, glyph)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("allele", "@allele"),
("position", "@position"),
("peptide", "@peptide"),
("score", "@score"),
("predictor", "@predictor"),
])
plot.xaxis.major_label_text_font_size = "9pt"
plot.xaxis.major_label_text_font_style = "bold"
plot.ygrid.grid_line_color = None
plot.xgrid.minor_grid_line_alpha = 0.1
plot.xgrid.minor_grid_line_color = 'gray'
#plot.xgrid.minor_grid_line_dash = [6, 4]
plot.yaxis.major_label_text_font_size = '0pt'
#plot.xaxis.major_label_orientation = np.pi/4
plot.min_border = 10
plot.background_fill_color = "#fafaf4"
plot.background_fill_alpha = 0.5
plot.legend.orientation = "horizontal"
plot.legend.location = "bottom_right"
#plot.legend.label_text_font_size = 12
plot.toolbar.logo = None
plot.toolbar_location = "right"
return plot
def bokeh_plot_sequence(preds, name=None, n=2, cutoff=.95, cutoff_method='default',
width=1000, color_sequence=False, title=''):
"""Plot sequence view of binders """
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, LinearAxis, Grid, Range1d, Text, Rect, CustomJS, Slider, RangeSlider, FactorRange
from bokeh.layouts import gridplot, column
colors = []
seqs = []
text = []
alleles = []
ylabels = []
pcolors = get_bokeh_colors()
for P in preds:
print (P.name)
df = P.data
#get sequence from results dataframe
seq = base.sequence_from_peptides(df)
l = base.get_length(df)
b = P.get_binders(name=name, cutoff=cutoff, cutoff_method=cutoff_method)
pb = P.promiscuous_binders(name=name, cutoff=cutoff, n=n, cutoff_method=cutoff_method)
b = b[b.pos.isin(pb.pos)] #only promiscuous
grps = b.groupby('allele')
al = list(grps.groups)
alleles.extend(al)
ylabels.extend([P.name+' '+i for i in al])
currseq=[seq for i in al]
seqs.extend(currseq)
t = [i for s in currseq for i in s]
text.extend(t)
print (len(seqs),len(text))
for a in al:
pos=[]
f = list(b[b.allele==a].pos)
for i in f:
pos.extend(np.arange(i,i+l))
if color_sequence is True:
c = plotting.get_sequence_colors(seq)
else:
c = ['white' for i in seq]
for i in pos:
c[i] = pcolors[P.name]
colors.extend(c)
#put into columndatasource for plotting
N = len(seqs[0])
S = len(alleles)
x = np.arange(1, N+1)
y = np.arange(0,S,1)
xx, yy = np.meshgrid(x, y)
gx = xx.ravel()
gy = yy.flatten()
recty = gy+.5
source = ColumnDataSource(dict(x=gx, y=gy, recty=recty, text=text, colors=colors))
plot_height = len(seqs)*15+60
x_range = Range1d(0,N+1, bounds='auto')
L=100
if len(seq)<100:
L=len(seq)
view_range = (0,L)
viewlen = view_range[1]-view_range[0]
fontsize="8.5pt"
tools="xpan, reset, save"
p = figure(title=title, plot_width=width, plot_height=plot_height, x_range=view_range, y_range=ylabels, tools=tools,
min_border=0, sizing_mode='stretch_both', lod_factor=10, lod_threshold=1000)
seqtext = Text(x="x", y="y", text="text", text_align='center',text_color="black",
text_font="monospace", text_font_size=fontsize)
rects = Rect(x="x", y="recty", width=1, height=1, fill_color="colors", line_color='gray', fill_alpha=0.6)
p.add_glyph(source, rects)
p.add_glyph(source, seqtext)
p.xaxis.major_label_text_font_style = "bold"
p.grid.visible = False
p.toolbar.logo = None
#preview view (no text)
p1 = figure(title=None, plot_width=width, plot_height=S*3+5, x_range=x_range, y_range=(0,S), tools=[],
min_border=0, sizing_mode='stretch_width', lod_factor=10, lod_threshold=10)
rects = Rect(x="x", y="recty", width=1, height=1, fill_color="colors", line_color=None, fill_alpha=0.6)
previewrect = Rect(x=viewlen/2,y=S/2, width=viewlen, height=S*.99, line_color='darkblue', fill_color=None)
p1.add_glyph(source, rects)
p1.add_glyph(source, previewrect)
p1.yaxis.visible = False
p1.grid.visible = False
p1.toolbar_location = None
#callback for slider move
jscode="""
var start = cb_obj.value[0];
var end = cb_obj.value[1];
x_range.setv({"start": start, "end": end})
rect.width = end-start;
rect.x = start+rect.width/2;
var fac = rect.width/width;
console.log(fac);
if (fac>=.14) { fontsize = 0;}
else { fontsize = 8.5; }
text.text_font_size=fontsize+"pt";
"""
callback = CustomJS(
args=dict(x_range=p.x_range,rect=previewrect,text=seqtext,width=p.plot_width), code=jscode)
slider = RangeSlider (start=0, end=N, value=(0,L), step=10)#, callback_policy="throttle")
slider.js_on_change('value_throttled', callback)
#callback for plot drag
jscode="""
start = parseInt(range.start);
end = parseInt(range.end);
slider.value[0] = start;
rect.width = end-start;
rect.x = start+rect.width/2;
"""
#p.x_range.callback = CustomJS(args=dict(slider=slider, range=p.x_range, rect=previewrect),
# code=jscode)
p = gridplot([[p1],[p],[slider]], toolbar_location="below", merge_tools=False)
return p
def bokeh_plot_grid(pred, name=None, width=None, palette='Blues', **kwargs):
"""Plot heatmap of binding results for a predictor."""
from bokeh.plotting import figure
from bokeh.models import (Range1d,HoverTool,FactorRange,ColumnDataSource,
LinearColorMapper,LogColorMapper,callbacks,DataRange)
from bokeh.palettes import all_palettes
TOOLS = "xpan, xwheel_zoom, hover, reset, save"
if width == None:
sizing_mode = 'scale_width'
width=900
else:
sizing_mode = 'fixed'
P=pred
df = P.data
if df is None:
return
cols = ['allele','pos','peptide',P.scorekey]
#d = df[cols].copy()
b = P.get_binders(name=name,**kwargs)
d = P.data.copy()
#mark binders
mask = d.index.isin(b.index)
d['binder'] = mask
l = base.get_length(df)
grps = df.groupby('allele')
alleles = grps.groups
seqlen = get_seq_from_binders(P, name)
seq = base.seq_from_binders(df)
height = 300
alls = len(alleles)
x_range = Range1d(0,seqlen-l+1, bounds='auto')
#x_range = list(seq)
y_range = df.allele.unique()
val = P.scorekey
cut = P.cutoff
if P.name not in ['tepitope']:
d['score1'] = d[val].apply( lambda x: 1-math.log(x, 50000))
val='score1'
d[val][d.binder==False] = min(d[val])
source = ColumnDataSource(d)
colors = all_palettes[palette][7]
mapper = LinearColorMapper(palette=colors, low=d[val].max(), high=d[val].min())
p = figure(title=P.name+' '+name,
x_range=x_range, y_range=y_range,
x_axis_location="above", plot_width=width, plot_height=height,
tools=TOOLS, toolbar_location='below', sizing_mode=sizing_mode)
p.rect(x="pos", y="allele", width=1, height=1,
source=source,
fill_color={'field': val,'transform':mapper},
line_color='gray', line_width=.1)
p.select_one(HoverTool).tooltips = [
('allele', '@allele'),
(P.scorekey, '@%s{1.11}' %P.scorekey),
('pos', '@pos'),
('peptide', '@peptide')
]
p.toolbar.logo = None
p.yaxis.major_label_text_font_size = "10pt"
p.yaxis.major_label_text_font_style = "bold"
return p
def bokeh_plot_bar(preds, name=None, allele=None, title='', width=None, height=100,
palette='Set1', tools=True, x_range=None):
"""Plot bars combining one or more prediction results for a set of
peptides in a protein/sequence"""
from bokeh.models import Range1d,HoverTool,ColumnDataSource
from bokeh.plotting import figure
from bokeh.transform import dodge
from bokeh.core.properties import value
height = 180
seqlen = 0
if width == None:
width=700
sizing_mode='scale_width'
for P in preds:
if P.data is None or len(P.data)==0:
continue
seqlen = get_seq_from_binders(P)
if x_range == None:
x_range = Range1d(0,seqlen)
y_range = Range1d(start=0, end=1)
if tools == True:
tools="xpan, xwheel_zoom, reset, hover"
else:
tools=None
plot = figure(title=title,plot_width=width,sizing_mode=sizing_mode,
plot_height=height, y_range=y_range, x_range=x_range,
y_axis_label='rank',
tools=tools)
colors = get_bokeh_colors(palette)
data = {}
mlist = []
for pred in preds:
m = pred.name
df = pred.data
if df is None or len(df) == 0:
continue
if name != None:
df = df[df.name==name]
grps = df.groupby('allele')
alleles = grps.groups.keys()
if allele not in alleles:
continue
#print (m, alleles, allele)
df = df[df.allele==allele]
df = df.sort_values('pos').set_index('pos')
key = pred.scorekey
X = df[key]
X = (X+abs(X.min())) / (X.max() - X.min())
data[m] = X.values
data['pos'] = list(X.index)
#data['peptide'] = df.peptide.values
mlist.append(m)
source = ColumnDataSource(data)
w = round(1.0/len(mlist),1)-.1
i=-w/2
for m in mlist:
#m = pred.name
c = colors[m]
plot.vbar(x=dodge('pos', i, range=plot.x_range), top=m, width=w, source=source,
color=c, legend=value(m), alpha=.8)
i+=w
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
#("allele", "@allele"),
("pos", "@pos") ])
plot.min_border = 10
plot.background_fill_color = "beige"
plot.background_fill_alpha = 0.5
plot.toolbar.logo = None
plot.toolbar_location = "right"
plot.legend.location = "top_right"
plot.legend.orientation = "horizontal"
return plot
def bokeh_vbar(x, height=200, title='', color='navy'):
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
source = ColumnDataSource(data={'chr':list(x.index),'x':range(len(x)),'y':x.values})
plot = figure(title=title, x_range = list(x.index), plot_height=height, tools='save,reset')
plot.vbar(x='chr',top='y', width=.8, bottom=0,source=source, color=color)
plot.ygrid.grid_line_color = None
plot.xgrid.grid_line_color = None
plot.xaxis.major_label_orientation = np.pi/4
return plot
def bokeh_pie_chart(df, title='', radius=.5, width=400, height=400, palette='Spectral'):
"""Bokeh pie chart"""
from bokeh.plotting import figure
from bokeh.models import HoverTool,ColumnDataSource
from math import pi
s = df.cumsum()/df.sum()
cats = s.index
p=[0]+list(s)
#print (p)
starts = [1/2*pi-(i*2*pi) for i in p[:-1]]
ends = [1/2*pi-(i*2*pi) for i in p[1:]]
from bokeh.palettes import brewer
n = len(s)
pal = brewer[palette][6]
source = ColumnDataSource(
dict(x=[0 for x in s], y=[0 for x in s],
radius = [radius for x in s],
category= cats,
starts=starts,
ends=ends,
colors=pal,
counts = df
))
plot = figure(title=title, plot_width=width, plot_height=height, tools='save,reset')
plot.wedge(x='x', y='y', radius='radius', direction="clock", fill_color='colors', color='black',
start_angle='starts', end_angle='ends', legend='category', source=source)
plot.axis.visible = False
plot.ygrid.visible = False
plot.xgrid.visible = False
#hover = plot.select(dict(type=HoverTool))
#hover.tooltips = [
# ('category', '@category'),
# ('percents','@counts')
#]
return plot
def plot_tracks(preds, name, n=1, cutoff=.95, cutoff_method='default', regions=None,
legend=False, colormap='Paired', figsize=None, ax=None, **kwargs):
"""
Plot binders as bars per allele using matplotlib.
Args:
preds: list of one or more predictors
name: name of protein to plot
n: number of alleles binder should be found in to be displayed
cutoff: percentile cutoff to determine binders to show
"""
import matplotlib as mpl
import pylab as plt
from matplotlib.patches import Rectangle
if ax == None:
if figsize==None:
h = sum([len(p.data.groupby('allele')) for p in preds])
w = 10
h = round(h*.1+2)
figsize = (w,h)
#plt.clf()
fig = plt.figure(figsize=figsize,facecolor='white')
ax = fig.add_subplot(111)
p = len(preds)
cmap = mpl.cm.get_cmap(colormap)
colors = { preds[i].name : cmap(float(i)/p) for i in range(p) }
alleles = []
leg = []
y=0
handles = []
for pred in preds:
m = pred.name
df = pred.data
if df is None or len(df) == 0:
print('no data to plot for %s' %m)
continue
if name != None:
if name not in df.name.unique():
print ('no such sequence %s' %name)
continue
df = df[df.name==name]
sckey = pred.scorekey
binders = pred.get_binders(name=name, cutoff=cutoff,
cutoff_method=cutoff_method)
#print (binders)
pb = pred.promiscuous_binders(binders=binders, n=n)
if len(pb) == 0:
continue
l = base.get_length(pb)
seqlen = df.pos.max()+l
#print (name,m,df.pos.max(),l,seqlen)
grps = df.groupby('allele')
if m in colors:
c=colors[m]
else:
c='blue'
leg.append(m)
order = sorted(grps.groups)
alleles.extend(order)
#for a,g in grps:
for a in order:
g = grps.groups[a]
b = binders[binders.allele==a]
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort_values('pos',inplace=True)
pos = b['pos'].values+1 #assumes pos is zero indexed
#clrs = [scmap.to_rgba(i) for i in b[sckey]]
#for x,c in zip(pos,clrs):
for x in pos:
rect = ax.add_patch(Rectangle((x,y), l, 1, facecolor=c, edgecolor='black',
lw=1.5, alpha=0.6))
y+=1
handles.append(rect)
if len(leg) == 0:
return
ax.set_xlim(0, seqlen)
ax.set_ylim(0, len(alleles))
w=20
if seqlen>500: w=100
ax.set_xticks(np.arange(0, seqlen, w))
ax.set_ylabel('allele')
ax.set_yticks(np.arange(.5,len(alleles)+.5))
fsize = 14-1*len(alleles)/40.
ax.set_yticklabels(alleles, fontsize=fsize )
ax.grid(b=True, which='major', alpha=0.5)
ax.set_title(name, fontsize=16, loc='right')
if regions is not None:
r = regions[regions.name==name]
coords = (list(r.start),list(r.end-r.start))
coords = zip(*coords)
plot_regions(coords, ax, color='gray')
if legend == True:
ax.legend(handles, leg, bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3)
plt.tight_layout()
return ax
def plot_regions(coords, ax, color='red', label='', alpha=0.6):
"""Highlight regions in a prot binder plot"""
from matplotlib.patches import Rectangle
#l = len(seqs.head(1)['key'].max())
h = ax.get_ylim()[1]
for c in coords:
x,l = c
ax.add_patch(Rectangle((x,0), l, h,
facecolor=color, lw=.8, alpha=alpha, zorder=0))
return
def draw_labels(labels, coords, ax):
"""Add labels on axis"""
bbox_args = dict(boxstyle='square',fc='whitesmoke')
from matplotlib.transforms import blended_transform_factory
tform = blended_transform_factory(ax.transData, ax.transAxes)
for text, x in zip(labels,coords):
xy = (x,-.05)
an = ax.annotate(text, xy=xy, xycoords=tform,
ha='center', va="center",
size=14,
zorder=10, textcoords='offset points',
bbox=bbox_args)
plt.subplots_adjust(bottom=0.1)
return
def plot_bars(P, name, chunks=1, how='median', cutoff=20, color='black'):
"""
Bar plots for sequence using median/mean/total scores.
Args:
P: predictor with data
name: name of protein sequence
chunks: break sequence up into 1 or more chunks
how: method to calculate score bar value
perc: percentile cutoff to show peptide
"""
import seaborn as sns
df = P.data[P.data.name==name].sort_values('pos')
w = 10
l= base.get_length(df)
seqlen = df.pos.max()+l
funcs = {'median': np.median, 'mean': np.mean, 'sum': np.sum}
grps = df.groupby('pos')
key = P.scorekey
X = grps.agg({key: np.median, 'peptide': base.first})
q = (1-cutoff/100.) #score quantile value
cutoff = X[key].quantile(q)
X[key][X[key]<cutoff] = np.nan
if len(X)<20:
chunks = 1
seqlist = X.peptide.apply( lambda x : x[0])
seqchunks = np.array_split(X.index, chunks)
f,axs = plt.subplots(chunks,1,figsize=(15,2+2.5*chunks))
if chunks == 1:
axs = [axs]
else:
axs = list(axs.flat)
for c in range(chunks):
#print (c)
ax = axs[c]
st = seqchunks[c][0]
end = seqchunks[c][-1]
p = X[st:end]
#p = p[p.peptide.isin(cb.peptide)]
ax.bar(p.index, p[key], width=1, color=color)
ax.set_title(str(st)+'-'+str(end), loc='right')
xseq = seqlist[st:end]
if len(xseq)<150:
fsize = 16-1*len(xseq)/20.
ax.set_xlim(st,end)
ax.set_xticks(p.index+0.5)
ax.set_xticklabels(xseq, rotation=0, fontsize=fsize)
ax.set_ylim(X[key].min(), X[key].max())
f.suptitle(name+' - '+P.name)
plt.tight_layout()
return axs
def plot_bcell(plot,pred,height,ax=None):
"""Line plot of iedb bcell results"""
x = pred.data.Position
y = pred.data.Score
h = height
y = y+abs(min(y))
y = y*(h/max(y))+3
#plot.line(x, y, line_color="red", line_width=2, alpha=0.6,legend='bcell')
ax.plot(x,y,color='blue')
return
def plot_seqdepot(annotation, ax):
"""Plot sedepot annotations - replace with generic plot coords track"""
from matplotlib.patches import Rectangle
y=-1.5
fontsize=12
if 'signalp' in annotation:
bbox_args = dict(boxstyle='rarrow', fc='white', lw=1, alpha=0.8)
pos = annotation['signalp'].values()
print (pos)
for x in pos:
an = ax.annotate('SP', xy=(x,y), xycoords='data',
ha='left', va="center", bbox=bbox_args,
size=fontsize)
if 'tmhmm' in annotation:
vals = annotation['tmhmm']
pos = [i[0]+(i[1]-i[0])/2.0 for i in vals]
widths = [i[1]-i[0] for i in vals]
bbox_args = dict(boxstyle='round', fc='deepskyblue', lw=1, alpha=0.8)
for x,w in zip(pos,widths):
an = ax.annotate('TMHMM', xy=(x,y), xycoords='data',
ha='left', va="center", bbox=bbox_args,
size=fontsize)
if 'pfam27' in annotation:
vals = annotation['pfam27']
text = [i[0] for i in vals]
pos = [i[1]+(i[2]-i[1])/2.0 for i in vals]
widths = [i[2]-i[1] for i in vals]
#print (pos,widths,text)
bbox_args = dict(boxstyle='round', fc='white', lw=1, alpha=0.8)
for x,w,t in zip(pos,widths,text):
an = ax.annotate(t, xy=(x,y), xycoords='data',
ha='left', va="center", bbox=bbox_args,
size=fontsize)
ax.set_ylim(y-1, ax.get_ylim()[1])
return
def plot_multiple(preds, names, kind='tracks', regions=None, genome=None, **kwargs):
"""Plot results for multiple proteins"""
for prot in names:
if kind == 'tracks':
ax = plot_tracks(preds,name=prot,**kwargs)
elif kind == 'bar':
axs = plot_bars(preds[0],name=prot)
ax = axs[0]
if regions is not None:
r = regions[regions.name==prot]
print (r)
#print genome[genome.locus_tag==prot]
coords = (list(r.start),list(r.end-r.start))
coords = zip(*coords)
plot_regions(coords, ax, color='gray')
#labels = list(r.peptide)
#plotting.draw_labels(labels, coords, ax)
if genome is not None:
p = genome[genome['locus_tag']==prot]
seq = p.translation.iloc[0]
from . import analysis
sd = analysis.get_seqdepot(seq)['t']
plot_seqdepot(sd, ax)
plt.tight_layout()
plt.show()
return
def plot_binder_map(P, name, values='rank', cutoff=20, chunks=1, cmap=None):
"""
Plot heatmap of binders above a cutoff by rank or score.
Args:
P: predictor object with data
name: name of protein to plot
values: data column to use for plot data, 'score' or 'rank'
cutoff: cutoff if using rank as values
chunks: number of plots to split the sequence into
"""
import pylab as plt
import seaborn as sns
df = P.data[P.data.name==name].sort_values('pos')
w = 10
l= base.get_length(df)
seqlen = df.pos.max()+l
f,axs = plt.subplots(chunks,1,figsize=(15,3+2.5*chunks))
if chunks == 1:
axs = [axs]
else:
axs = list(axs.flat)
if values == 'score':
values = P.scorekey
if cmap == None: cmap='RdBu_r'
X = df.pivot_table(index='allele', columns='pos', values=values)
if values == P.scorekey:
#normalise score across alleles for clarity
zscore = lambda x: (x - x.mean()) / x.std()
X = X.apply(zscore, 1)
if values == 'rank':
X[X > cutoff] = 0
if cmap == None: cmap='Blues'
s = df.drop_duplicates(['peptide','pos'])
seqlist = s.set_index('pos').peptide.apply( lambda x : x[0])
#print seqlist
seqchunks = np.array_split(X.columns, chunks)
for c in range(chunks):
ax = axs[c]
p = X[seqchunks[c]]
#plot heatmap
vmin=min(X.min()); vmax=max(X.max())
center = vmin+(vmax-vmin)/2
sns.heatmap(p, cmap=cmap, cbar_kws={"shrink": .5},
vmin=vmin, vmax=vmax, #center=center,
ax=ax, xticklabels=20)
#show sequence on x-axis
st = seqchunks[c][0]
end = seqchunks[c][-1]
xseq = seqlist[st:end]
ax.set_title(str(st)+'-'+str(end), loc='right')
ax.spines['bottom'].set_visible(True)
if len(seqchunks[c])<150:
fsize = 16-1*len(seqchunks[c])/20.
ax.set_xticks(np.arange(0,len(xseq))+0.5)
ax.set_xticklabels(xseq, rotation=0, fontsize=fsize)
f.suptitle(name+' - '+P.name)
plt.tight_layout()
return ax
def binders_to_coords(df):
"""Convert binder results to dict of coords for plotting"""
coords = {}
if not 'start' in df.columns:
df=base.get_coords(df)
if 'start' in df.columns:
for i,g in df.groupby('name'):
l = g.end-g.start
coords[i] = zip(g.start,l)
return coords
def plot_overview(genome, coords=None, cols=2, colormap='Paired',
legend=True, figsize=None):
"""
Plot regions of interest in a group of protein sequences. Useful for
seeing how your binders/epitopes are distributed in a small genome or subset of genes.
Args:
genome: dataframe with protein sequences
coords: a list/dict of tuple lists of the form {protein name: [(start,length)..]}
cols: number of columns for plot, integer
"""
import pylab as plt
if type(coords) is list:
coords = { i:coords[i] for i in range(len(coords)) }
legend=False
import matplotlib as mpl
import seaborn as sns
#sns.reset_orig()
cmap = mpl.cm.get_cmap(colormap)
t = len(coords)
colors = [cmap(float(i)/t) for i in range(t)]
from matplotlib.patches import Rectangle
names = [coords[c].keys() for c in coords][0]
df = genome[genome.locus_tag.isin(names)]
rows = int(np.ceil(len(names)/float(cols)))
if figsize == None:
h = round(len(names)*.2+10./cols)
figsize = (14,h)
f,axs=plt.subplots(rows,cols,figsize=figsize)
grid=axs.flat
rects = {}
i=0
for idx,prot in df.iterrows():
ax=grid[i]
protname = prot.locus_tag
seq = prot.translation
if 'description' in prot:
title = prot.description
else:
title = protname
y=0
for label in coords:
c = coords[label]
if not protname in c:
continue
vals = c[protname]
#print vals
for v in vals:
x,l = v[0],v[1]
rect = ax.add_patch(Rectangle((x,y), l, .9,
facecolor=colors[y],
lw=1.2, alpha=0.8))
if len(v)>2:
s = v[2]
bbox_args = dict(fc=colors[y], lw=1.2, alpha=0.8)
ax.annotate(s, (x+l/2, y),
fontsize=12, ha='center', va='bottom')
if not label in rects:
rects[label] = rect
y+=1
i+=1
slen = len(seq)
w = round(float(slen)/20.)
w = math.ceil(w/20)*20
ax.set_xlim(0, slen)
ax.set_ylim(0, t)
ax.set_xticks(np.arange(0, slen, w))
ax.set_yticks([])
ax.set_title(title, fontsize=16, loc='right')
if i|2!=0 and cols>1:
try:
f.delaxes(grid[i])
except:
pass
if legend == True:
f.legend(rects.values(), rects.keys(), loc=4)
plt.tight_layout()
return
def seqdepot_to_coords(sd, key='pfam27'):
"""
Convert seqdepot annotations to coords for plotting
"""
coords=[]
if len(sd['t'])==0 or not key in sd['t']:
return []
x = sd['t'][key]
#print x
if key in ['pfam27','pfam28']:
coords = [(i[1],i[2]-i[1],i[0]) for i in x]
elif key in ['gene3d','prints']:
coords = [(i[2],i[3]-i[2],i[1]) for i in x]
elif key == 'tmhmm':
coords = [(i[0],i[1]-i[0]) for i in x]
elif key == 'signalp':
x = x.items()
coords = [(i[1],10,'SP') for i in x]
return coords
def get_seqdepot_annotation(genome, key='pfam27'):
"""
Get seqdepot annotations for a set of proteins in dataframe.
"""
from . import seqdepot
annot={}
for i,row in genome.iterrows():
n = row.locus_tag
seq = row.translation
#print n,seq
sd = seqdepot.new()
aseqid = sd.aseqIdFromSequence(seq)
result = sd.findOne(aseqid)
#for x in result['t']:
# print x, result['t'][x]
x = seqdepot_to_coords(result, key)
annot[n] = x
return annot
| apache-2.0 |
wrightni/OSSP | training_gui.py | 1 | 40462 | #title: Training Set Creation for Random Forest Classification
#author: Nick Wright
#Inspired by: Justin Chen
#purpose: Creates a GUI for a user to identify watershed superpixels of an image as
# melt ponds, sea ice, or open water to use as a training data set for a
# Random Forest Classification method.
# Python 3:
import tkinter as tk
# Python 2:
# import Tkinter as tk
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import h5py
import os
import argparse
from ctypes import *
import gdal
from sklearn.ensemble import RandomForestClassifier
from select import select
import sys
import preprocess as pp
from segment import segment_image
from lib import utils
from lib import attribute_calculations as attr_calc
class PrintColor:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
class Buttons(tk.Frame):
# Defines the properties of all the controller buttons to be used by the GUI.
def __init__(self, parent):
tk.Frame.__init__(self, parent)
prev_btn = tk.Button(self, text="Previous Segment", width=16, height=2,
command=lambda: parent.event_manager.previous_segment())
prev_btn.grid(column=0, row=0, pady=(0,20))
water_btn = tk.Button(self, text="Open Water", width=16, height=2, highlightbackground='#000000',
command=lambda: parent.event_manager.classify("water"))
water_btn.grid(column=0, row=1, pady=1)
melt_btn = tk.Button(self, text="Melt Pond", width=16, height=2, highlightbackground='#4C678C',
command=lambda: parent.event_manager.classify("melt"))
melt_btn.grid(column=0, row=2, pady=1)
gray_btn = tk.Button(self, text="Dark and Thin Ice", width=16, height=2, highlightbackground='#D2D3D5',
command=lambda: parent.event_manager.classify("gray"))
gray_btn.grid(column=0, row=3, pady=1)
snow_btn = tk.Button(self, text="Snow or Ice", width=16, height=2,
command=lambda: parent.event_manager.classify("snow"))
snow_btn.grid(column=0, row=4, pady=1)
shadow_btn = tk.Button(self, text="Shadow", width=16, height=2, highlightbackground='#FF9200',
command=lambda: parent.event_manager.classify("shadow"))
shadow_btn.grid(column=0, row=5, pady=1)
unknown_btn = tk.Button(self, text="Unknown / Mixed", width=16, height=2,
command=lambda: parent.event_manager.classify("unknown"))
unknown_btn.grid(column=0, row=6, pady=1)
auto_btn = tk.Button(self, text="Autorun", width=16, height=2,
command=lambda: parent.event_manager.autorun())
auto_btn.grid(column=0, row=7, pady=(20,0))
next_btn = tk.Button(self, text="Next Image", width=16, height=2,
command=lambda: parent.event_manager.next_image())
next_btn.grid(column=0, row=8, pady=1)
quit_btn = tk.Button(self, text="Save and Quit", width=16, height=2,
command=lambda: parent.event_manager.quit_event())
quit_btn.grid(column=0, row=9, pady=1)
load_first_btn = tk.Button(self, text="Initialize Image", width=16, height=2,
command=lambda: parent.event_manager.initialize_image())
load_first_btn.grid(column=0, row=10, pady=(40,0))
class ProgressBar(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
self.total_counter = tk.StringVar()
self.total_counter.set("Total Progress: {}".format(0))
self.image_tracker = tk.StringVar()
self.image_tracker.set("")
total_text = tk.Label(self, textvariable=self.total_counter)
total_text.grid(column=0, row=0)
image_text = tk.Label(self, textvariable=self.image_tracker)
image_text.grid(column=0, row=1)
def update_progress(self):
self.total_counter.set("Total Progress: {}".format(self.parent.data.get_num_labels()))
self.image_tracker.set("Image {} of {}".format(self.parent.data.im_index + 1,
len(self.parent.data.available_images)))
class ImageDisplay(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Initialize class variables
# Populated in initialize_image method:
self.display_image = None
self.disp_xdim, self.disp_ydim, = 0, 0
# Populated in update_images:
self.zoom_win_x, self.zoom_win_y = 0, 0
# Creating the canvas where the images will be
self.fig = plt.figure(figsize=[10, 10])
self.fig.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.99, wspace=0.01, hspace=0.01)
canvas = FigureCanvasTkAgg(self.fig, self)
canvas.draw()
# toolbar = NavigationToolbar2TkAgg(canvas, frame)
canvas.get_tk_widget().grid(column=0, row=0)
# toolbar.pack(in_=frame, side='top')
self.cid = self.fig.canvas.mpl_connect('button_press_event', parent.event_manager.onclick)
# Create a placeholder while image data is loading
self.initial_display()
def initialize_image(self):
# Creates a local composite of the original image data for display
if self.parent.data.im_type == 'wv02_ms':
self.display_image = utils.create_composite([self.parent.data.original_image[4, :, :],
self.parent.data.original_image[2, :, :],
self.parent.data.original_image[1, :, :]],
dtype=np.uint8)
elif self.parent.data.im_type == 'pan':
self.display_image = utils.create_composite([self.parent.data.original_image,
self.parent.data.original_image,
self.parent.data.original_image],
dtype=np.uint8)
elif self.parent.data.im_type == 'srgb':
self.display_image = utils.create_composite([self.parent.data.original_image[0, :, :],
self.parent.data.original_image[1, :, :],
self.parent.data.original_image[2, :, :]],
dtype=np.uint8)
self.disp_xdim, self.disp_ydim = np.shape(self.display_image)[0:2]
def loading_display(self):
plt.clf()
loading_text = "Images are loading, please wait... "
# Creates a image placeholder while the data is being loaded.
ax = self.fig.add_subplot(1, 1, 1, adjustable='datalim', frame_on=False)
ax.text(0.5, 0.5, loading_text, horizontalalignment='center', verticalalignment='center')
ax.axis('off')
# Updating the plots
self.fig.canvas.draw()
def initial_display(self):
plt.clf()
welcome_text = "No images have been loaded. Press <Initialize Image> to begin."
tds_text = "Training data file: \n {}".format(self.parent.data.tds_filename)
image_text = "Images found: \n"
if len(self.parent.data.available_images) == 0:
image_text += 'None'
else:
for im in self.parent.data.available_images:
image_text += im + '\n'
# Creates a image placeholder while the data is being loaded.
ax = self.fig.add_subplot(2, 1, 1, adjustable='datalim', frame_on=False)
ax.text(0.5, 0.3, welcome_text, horizontalalignment='center', verticalalignment='bottom', weight='bold')
ax.axis('off')
ax2 = self.fig.add_subplot(2, 1, 2, adjustable='datalim', frame_on=False)
ax2.text(0.5, 1, tds_text, horizontalalignment='center', verticalalignment='center')
ax2.text(0.5, .9, image_text, horizontalalignment='center', verticalalignment='top')
ax2.axis('off')
# Updating the plots
self.fig.canvas.draw()
def update_images(self, segment_id):
# Clear the existing display
plt.clf()
current_seg = self.parent.data.segmented_image == segment_id # array of 0 or 1 where 1 = current segment
segment_pos = np.nonzero(current_seg) # returns the array position of the segment
zoom_size = 100
x_min = np.amin(segment_pos[0]) - zoom_size
x_max = np.amax(segment_pos[0]) + zoom_size
y_min = np.amin(segment_pos[1]) - zoom_size
y_max = np.amax(segment_pos[1]) + zoom_size
# Store the zoom window corner coordinates for reference in onclick()
# xMin and yMin are defined backwards
self.zoom_win_x = y_min
self.zoom_win_y = x_min
if x_min < 0:
x_min = 0
if x_max >= self.disp_xdim:
x_max = self.disp_xdim - 1
if y_min < 0:
y_min = 0
if y_max >= self.disp_ydim:
y_max = self.disp_ydim - 1
# Image 2 (Zoomed in image, no highlighted segment)
cropped_image = self.display_image[x_min:x_max, y_min:y_max]
# Image 3 (Zoomed in image, with segment highlight)
color_image = np.copy(self.display_image)
color_image[:, :, 0][current_seg] = 255
color_image[:, :, 2][current_seg] = 0
color_image = color_image[x_min:x_max, y_min:y_max]
# Text instructions
instructions = '''
Open Water: Surface areas that had zero ice cover
as well as those covered by an unconsolidated frazil
or grease ice. \n
Melt Pond: Surface areas with water covering ice.
Areas where meltwater is trapped in isolated patches
atop ice, and the optically similar submerged ice
near the edge of a floe. \n
Dark Ice:
Freezing season: Surfaces of thin ice that are
not snow covered, including nilas and young ice.
Melt season: ice covered by saturated slush,
but not completely submerged in water \n
Snow/Ice: Optically thick ice, and ice with a snow cover. \n
Shadow: Surfaces that are covered by a dark shadow.
\n
'''
# Plotting onto the GUI
ax = self.fig.add_subplot(2, 2, 1)
ax.imshow(color_image, interpolation='None', vmin=0, vmax=255)
ax.tick_params(axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
ax.set_label('ax1')
ax = self.fig.add_subplot(2, 2, 2)
ax.imshow(cropped_image, interpolation='None', vmin=0, vmax=255)
ax.tick_params(axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
ax.set_label('ax2')
ax = self.fig.add_subplot(2, 2, 3)
ax.imshow(self.display_image, interpolation='None', vmin=0, vmax=255)
ax.axvspan(y_min,
y_max,
1. - float(x_max) / self.disp_xdim,
1. - float(x_min) / self.disp_xdim,
color='red',
alpha=0.3)
ax.set_xlim([0, np.shape(self.display_image)[1]])
ax.tick_params(axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
right=False,
labelleft=False,
labelbottom=False)
ax.set_label('ax3')
ax = self.fig.add_subplot(2, 2, 4, adjustable='datalim', frame_on=False)
ax.text(0.5, 0.5, instructions, horizontalalignment='center', verticalalignment='center')
ax.axis('off')
# Updating the plots
self.fig.canvas.draw()
class DataManager:
def __init__(self, available_images, tds_filename, username, im_type):
# Image and segment data (populated in load_image())
self.original_image = None
self.segmented_image = None
# Variable Values (populated in load_training_data())
self.label_vector = []
self.segment_list = []
self.feature_matrix = []
self.tracker = 0 # Number of segment sets added from the current image
self.im_index = 0 # Index for progressing through available images
# Global Static Values
self.tds_filename = tds_filename
self.username = username
self.im_type = im_type
self.available_images = available_images
# Image Static Value (populated in load_image())
self.wb_ref = None
self.br_ref = None
self.im_date = None
self.im_name = None
def load_next_image(self):
# Increment the image index
self.im_index += 1
# Loop im_index based on the available number of images
self.im_index = self.im_index % len(self.available_images)
# Load the new data
self._load_image()
def load_previous_image(self):
# If an image has already been loaded, and there is no previous data,
# prevent the user from using this button.
if self.get_num_labels() == 0 and self.im_name is not None:
return
# If labels exist find the correct image to load
if self.get_num_labels() != 0:
# If this does not find a match, im_index will default to its current value
for i in range(len(self.available_images)):
if self.get_current_segment()[0] in self.available_images[i]:
self.im_index = i
self._load_image()
def _load_image(self):
# Loads the optical and segmented image data from disk. Should only be called from
# load_next_image method.
full_image_name = self.available_images[self.im_index]
self.im_name = os.path.splitext(os.path.split(full_image_name)[1])[0]
src_ds = gdal.Open(full_image_name, gdal.GA_ReadOnly)
# Read the image date from the metadata
metadata = src_ds.GetMetadata()
self.im_date = pp.parse_metadata(metadata, self.im_type)
# Determine the datatype
src_dtype = gdal.GetDataTypeSize(src_ds.GetRasterBand(1).DataType)
# Calculate the reference points from the image histogram
lower, upper, wb_ref, br_ref = pp.histogram_threshold(src_ds, src_dtype)
self.wb_ref = np.array(wb_ref, dtype=c_uint8)
self.br_ref = np.array(br_ref, dtype=c_uint8)
# Load the image data
image_data = src_ds.ReadAsArray()
# Close the GDAL dataset
src_ds = None
# Rescale the input dataset using a histogram stretch
image_data = pp.rescale_band(image_data, lower, upper)
# Apply a white balance to the image
image_data = pp.white_balance(image_data, self.wb_ref.astype(np.float), float(np.amax(self.wb_ref)))
# Convert the input data to c_uint8
self.original_image = np.ndarray.astype(image_data, c_uint8)
print("Creating segments on provided image...")
watershed_image = segment_image(image_data, image_type=self.im_type)
# Convert the segmented image to c_int datatype. This is needed for the
# Cython methods that calculate attribute of segments.
self.segmented_image = np.ndarray.astype(watershed_image, c_uint32)
# Clear these from memory explicitly
image_data = None
watershed_image = None
def load_training_data(self):
try:
with h5py.File(self.tds_filename, 'r') as data_file:
# Load the existing feature matrix and segment list if they exist,
# otherwise initialize an empty array for these lists.
if 'feature_matrix' in list(data_file.keys()):
self.feature_matrix = data_file['feature_matrix'][:].tolist()
else:
self.feature_matrix = []
if 'segment_list' in list(data_file.keys()):
# For loading files created in py2
self.segment_list = [[name[0].decode(), name[1].decode()] for name in data_file['segment_list']]
else:
self.segment_list = []
# Determine if this user has data already stored in the training set. If so,
# use the existing classifications. If not, start from the beginning.
# must use .tolist() because datasets in h5py files are numpy arrays, and we want
# these as python lists.
# [y1...yn] column vector where n : number of classified segments, y = classification
if self.username in list(data_file.keys()):
self.label_vector = data_file[self.username][:].tolist()
else:
self.label_vector = []
# If the file does not exist, create empty values
except OSError:
self.feature_matrix = []
self.segment_list = []
self.label_vector = []
def get_num_labels(self):
return len(self.label_vector)
def append_label(self, label):
self.tracker += 1
self.label_vector.append(label)
# Removes the last entry from label_vector
def remove_last_label(self):
self.label_vector.pop()
self.tracker -= 1
def get_num_segments(self):
return len(self.segment_list)
# The current segment is the next one that doesn't have an associated label
def get_current_segment(self):
return self.segment_list[len(self.label_vector)]
def add_single_segment(self, new_segment):
self.segment_list.append(new_segment)
# Trims all unclassified segments from segment_list by trimming it to
# the length of label_vector
def trim_segment_list(self):
self.segment_list = self.segment_list[:len(self.label_vector)]
# Add 10 randomly selected segments to the list of ones to classify
def add_segments(self):
segments_to_add = []
a = 0
# Select random x,y coordinates from the input image, and pick the segment where the random
# pixel lands. This makes the selected segments representative of the average surface
# distribution within the image. This still wont work if the image has a minority of any
# particular surface type.
while len(segments_to_add)<10:
a += 1
z, x, y = np.shape(self.original_image)
i = np.random.randint(x)
j = np.random.randint(y)
# Find the segment label at the random pixel
segment_id = self.segmented_image[i][j]
sp_size = np.sum(self.segmented_image == segment_id)
if sp_size >= 20:
# Check for a duplicate segment already in the tds
new_segment = [self.im_name,
"{}".format(segment_id)]
if new_segment not in self.segment_list and new_segment not in segments_to_add:
segments_to_add.append(new_segment)
print(("Attempts: {}".format(a)))
self.segment_list += segments_to_add
def compute_attributes(self, segment_id):
# Create the a attribute list for the labeled segment
feature_array = calc_attributes(self.original_image, self.segmented_image,
self.wb_ref, self.br_ref, self.im_date, segment_id, self.im_type)
# attribute_calculations returns a 2d array, but we only want the 1d list of features.
feature_array = feature_array[0]
return feature_array
def append_features(self, feature_array):
# If there are fewer features than labels, assume the new one should be appended
# to the end
if len(self.feature_matrix) == len(self.label_vector) - 1:
#Adding all of the features found for this watershed to the main matrix
self.feature_matrix.append(feature_array)
# Otherwise replace the existing features with the newly calculated ones.
# (Maybe just skip this in the future and assume they were calculated correctly before?
else:
# old_feature_array = self.feature_matrix[len(self.label_vector) - 1]
print("Recalculated Feature.")
# print(("Old: {} {}".format(old_feature_array[0], old_feature_array[1])))
# print(("New: {} {}".format(feature_array[0], feature_array[1])))
self.feature_matrix[len(self.label_vector) - 1] = feature_array
class EventManager:
def __init__(self, parent):
self.parent = parent
self.is_active = False # Prevents events from happening while images are loading
def activate(self):
self.is_active = True
def deactivate(self):
self.is_active = False
def next_segment(self):
if not self.is_active:
return
# If all of the segments in the predefined list have been classified already,
# present the user with a random new segment.
if self.parent.data.get_num_labels() == self.parent.data.get_num_segments():
# I think if segment_list == [] is covered by the above..?
self.parent.data.add_segments()
# retrain the random forest model if the live predictor is active
if self.parent.live_predictor.is_active():
self.parent.live_predictor.retrain_model(self.parent.data.feature_matrix,
self.parent.data.label_vector)
# The current segment is the next one that doesn't have an associated label
current_segment = self.parent.data.get_current_segment()
segment_id = int(current_segment[1])
# Redraw the display with the new segment id
self.parent.image_display.update_images(segment_id)
def previous_segment(self):
if not self.is_active:
return
# Make sure this function returns null if there is no previous sp to go back to
if self.parent.data.get_num_labels() == 0:
return
else:
# Delete the last label in the list, then get the 'new' current segment
self.parent.data.remove_last_label()
current_segment = self.parent.data.get_current_segment()
self.parent.progress_bar.update_progress()
if current_segment[0] != self.parent.data.im_name:
self.previous_image()
return
segment_id = int(current_segment[1])
# Redraw the display with the new segment id
self.parent.image_display.update_images(segment_id)
def onclick(self, event):
if not self.is_active:
return
if event.inaxes is not None:
axes_properties = event.inaxes.properties()
segment_id = -1
x, y = 0, 0
# If the mouse click was in the overview image
if axes_properties['label'] == 'ax3':
x = int(event.xdata)
y = int(event.ydata)
segment_id = self.parent.data.segmented_image[y, x]
# Either of the top zoomed windows
if axes_properties['label'] == 'ax1' or axes_properties['label'] == 'ax2':
win_x = int(event.xdata)
win_y = int(event.ydata)
x = self.parent.image_display.zoom_win_x + win_x
y = self.parent.image_display.zoom_win_y + win_y
segment_id = self.parent.data.segmented_image[y, x]
# If user clicked on a valid location, add the segment that was clicked on to segment_list,
# then update the image render.
if segment_id >= 0:
print(("You clicked at ({}, {}) in {}".format(x, y, axes_properties['label'])))
print(("Segment id: {}".format(segment_id)))
new_segment = [self.parent.data.im_name,
"{}".format(segment_id)]
if new_segment not in self.parent.data.segment_list:
# Trim all unclassified segments
self.parent.data.trim_segment_list()
# Add the selected one as the next segment
self.parent.data.add_single_segment(new_segment)
# Get the new current segment and redraw display
segment_id = int(self.parent.data.get_current_segment()[1])
self.parent.image_display.update_images(segment_id)
else:
print("This segment has already been labeled")
def classify(self, key_press):
if not self.is_active:
return
# Assigning the highlighted segment a classification
segment_id = int(self.parent.data.get_current_segment()[1])
print("Segment ID: {}".format(segment_id))
# Note that we classified one more image
if key_press == "snow":
self.parent.data.append_label(1)
elif key_press == "gray":
self.parent.data.append_label(2)
elif key_press == "melt":
self.parent.data.append_label(3)
elif key_press == "water":
self.parent.data.append_label(4)
elif key_press == "shadow":
self.parent.data.append_label(5)
elif key_press == "unknown":
self.parent.data.append_label(6)
# Calculate the attributes for the current segment
feature_array = self.parent.data.compute_attributes(segment_id)
self.parent.data.append_features(feature_array)
# Printing some useful statistics
print("Assigned value: {} ({})".format(str(self.parent.data.label_vector[-1]), key_press))
if self.parent.live_predictor.is_active():
self.parent.live_predictor.print_prediction(feature_array)
print(("~"*80))
self.parent.progress_bar.update_progress()
self.next_segment()
# if len(self.feature_matrix) == len(self.label_vector)-1:
# #Adding all of the features found for this watershed to the main matrix
# self.feature_matrix.append(feature_array)
# else:
# old_feature_array = self.feature_matrix[len(self.label_vector)-1]
# print("Recalculated Feature.")
# print(("Old: {} {}".format(old_feature_array[0],old_feature_array[1])))
# print(("New: {} {}".format(feature_array[0], feature_array[1])))
# self.feature_matrix[len(self.label_vector)-1] = feature_array
def autorun(self):
if not self.is_active:
return
# In the future make this function a standalone window (instead of terminal output)??
# Prevent the user from accessing this if the predictor is inactive
if not self.parent.live_predictor.is_active():
print("Autorun functionality disabled")
return
# segment_id = int(self.segment_list[len(self.label_vector):][0][1])
segment_id = int(self.parent.data.get_current_segment()[1])
# Create the a attribute list for the labeled segment
feature_array = self.parent.data.compute_attributes(segment_id)
# feature_array = calc_attributes(self.original_image, self.secondary_image,
# self.wb_ref, self.br_ref, self.im_date, segment_id, self.im_type)
print("~" * 80)
# This both prints the results of the prediction for the user to check, and also returns the
# predicted values for use here.
pred, proba = self.parent.live_predictor.print_prediction(feature_array)
if 0.90 < proba < 0.96:
timeout = 4 #6
print((PrintColor.BOLD + "Label if incorrect:" + PrintColor.END))
elif proba < .9:
timeout = 10 #12
print((PrintColor.BOLD + PrintColor.RED + "Label if incorrect:" + PrintColor.END))
else:
timeout = 0.5
# Prompt the user to change the classification if they dont agree with the
# predicted one. If no input is recieved, the predicted one is assumed to be correct.
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline()
try:
s = int(s)
except ValueError:
print("Ending autorun.")
return
if 0 <= s < 6:
label = s
print(("Assigning label {} instead.".format(label)))
else:
print("Ending autorun.")
return
else:
label = pred
print(("No input. Assigning label: {}".format(label)))
self.parent.data.append_label(label)
self.parent.data.append_features(feature_array)
self.parent.progress_bar.update_progress()
self.next_segment()
self.parent.after(100, self.autorun)
def save(self):
if self.parent.data.label_vector == []:
return
print("Saving...")
username = self.parent.data.username
prev_names = []
prev_data = []
try:
with h5py.File(self.parent.data.tds_filename, 'r') as infile:
# Compiles all of the user data that was in the previous training validation file so that
# it can be added to the new file as well. (Because erasing and recreating a .h5 is easier
# than altering an existing one)
for prev_user in list(infile.keys()):
if prev_user != 'feature_matrix' and prev_user != 'segment_list' and prev_user != username:
prev_names.append(prev_user)
prev_data.append(infile[prev_user][:])
infile.close()
except OSError:
pass
# overwrite the h5 dataset with the updated information
with h5py.File(self.parent.data.tds_filename, 'w') as outfile:
outfile.create_dataset('feature_matrix', data=self.parent.data.feature_matrix)
outfile.create_dataset(username, data=self.parent.data.label_vector)
segment_list = np.array(self.parent.data.segment_list, dtype=np.string_)
outfile.create_dataset('segment_list', data=segment_list)
for i in range(len(prev_names)):
outfile.create_dataset(prev_names[i], data=prev_data[i])
print("Done.")
def next_image(self):
if not self.is_active:
return
self.deactivate()
# Trim the unlabeled segments from segment list
self.parent.data.trim_segment_list()
# Save the existing data
self.save()
# Set the display to the loading screen
self.parent.after(10, self.parent.image_display.loading_display())
# Load the next image data
self.parent.data.load_next_image()
# Add the new data to the display class
self.parent.image_display.initialize_image()
# Update the display screen
# Go to the next segment (which will add additional segments to the queue and update the display)
self.parent.progress_bar.update_progress()
self.activate()
self.next_segment()
def previous_image(self):
self.deactivate()
# Set the display to the loading screen
self.parent.after(10, self.parent.image_display.loading_display())
# Load the previous image data
self.parent.data.load_previous_image()
# Add the new data to the display class
self.parent.image_display.initialize_image()
# Update the display screen
# Go to the next segment (which will add additional segments to the queue and update the display)
self.parent.progress_bar.update_progress()
self.activate()
self.next_segment()
def initialize_image(self):
if len(self.parent.data.available_images) == 0:
print("No images to load!")
return
# Check to make sure no data has been loaded
if self.parent.data.im_name is not None:
return
# Previous image does all the loading work we need for the first image
self.previous_image()
def quit_event(self):
# Exits the GUI, automatically saves progress
self.save()
self.parent.exit_gui()
class LivePredictor:
def __init__(self, active_state):
self.active_state = active_state
self.is_trained = False
self.rfc = RandomForestClassifier(n_estimators=100)
# True if LivePredictor is running, false otherwise
def is_active(self):
return self.active_state
def retrain_model(self, feature_matrix, label_vector):
if len(label_vector) >= 10:
self.rfc.fit(feature_matrix[:len(label_vector)], label_vector)
self.is_trained = True
def print_prediction(self, feature_array):
if self.is_trained:
pred = self.rfc.predict(feature_array.reshape(1, -1))[0]
pred_prob = self.rfc.predict_proba(feature_array.reshape(1, -1))[0]
pred_prob = np.amax(pred_prob)
print(("Predicted value: {}{}{} ({})".format(PrintColor.PURPLE, pred, PrintColor.END, pred_prob)))
return pred, pred_prob
else:
return 0, 0
class TrainingWindow(tk.Frame):
def __init__(self, parent, img_list, tds_filename, username, im_type, activate_autorun=False):
tk.Frame.__init__(self, parent)
self.parent = parent
self.parent.title("Training GUI")
# Create the controlling buttons and place them on the right side.
self.buttons = Buttons(self)
self.buttons.grid(column=1, row=1, sticky="N")
# Manager for all the GUI events (e.g. button presses)
self.event_manager = EventManager(self)
# Data manager object
self.data = DataManager(img_list, tds_filename, username, im_type)
self.data.load_training_data()
# Create the image display window
self.image_display = ImageDisplay(self)
self.image_display.grid(column=0, row=0, rowspan=2)
self.progress_bar = ProgressBar(self)
self.progress_bar.grid(column=1, row=0)
self.progress_bar.update_progress()
# Object for creating on the fly predictions and managing the auto_run method
self.live_predictor = LivePredictor(activate_autorun)
# Define keybindings
self.parent.bind('1', lambda e: self.event_manager.classify("snow"))
self.parent.bind('2', lambda e: self.event_manager.classify("gray"))
self.parent.bind('3', lambda e: self.event_manager.classify("melt"))
self.parent.bind('4', lambda e: self.event_manager.classify("water"))
self.parent.bind('5', lambda e: self.event_manager.classify("shadow"))
self.parent.bind('<Tab>', lambda e: self.event_manager.classify("unknown"))
self.parent.bind('<BackSpace>', lambda e: self.event_manager.previous_segment())
def exit_gui(self):
self.parent.quit()
self.parent.destroy()
def calc_attributes(original_image, secondary_image,
wb_ref, br_ref, im_date, segment_id, im_type):
feature_array = []
if im_type == 'pan':
feature_array = attr_calc.analyze_pan_image(original_image,
secondary_image,
im_date,
segment_id=segment_id)
if im_type == 'srgb':
feature_array = attr_calc.analyze_srgb_image(original_image,
secondary_image,
segment_id=segment_id)
if im_type == 'wv02_ms':
feature_array = attr_calc.analyze_ms_image(original_image,
secondary_image,
wb_ref,
br_ref,
segment_id=segment_id)
return feature_array
# Returns all of the unique images in segment_list
def get_required_images(segment_list):
image_list = []
for seg_id in segment_list:
if not seg_id[0] in image_list:
image_list.append(seg_id[0])
return image_list
def validate_tds_file(tds_filename, input_dir, image_type):
# Set the default tds filename if this was not entered
if tds_filename is None:
tds_filename = os.path.join(input_dir, image_type + "_training_data.h5")
elif os.path.isfile(tds_filename):
# If a real file was given, try opening it.
try:
data_file = h5py.File(tds_filename, 'r')
data_file.close()
except OSError:
print("Invalid data file.")
quit()
return tds_filename
# Finds all the unique images from the given directory
def scrape_dir(src_dir):
image_list = []
for ext in utils.valid_extensions:
raw_list = utils.get_image_paths(src_dir, keyword=ext)
for raw_im in raw_list:
image_list.append(raw_im)
# Save only the unique entries
image_list = list(set(image_list))
utils.remove_hidden(image_list)
return image_list
if __name__ == "__main__":
#### Set Up Arguments
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="folder containing training images")
parser.add_argument("image_type", type=str, choices=['srgb','wv02_ms','pan'],
help="image type: 'srgb', 'wv02_ms', 'pan'")
parser.add_argument("--tds_file", type=str, default=None,
help='''Existing training dataset file. Will create a new one with this name if none exists.
default: <image_type>_training_data.h5''')
parser.add_argument("--username", type=str, default=None,
help='''username to associate with the training set.
default: image_type''')
parser.add_argument("-a", "--enable_autorun", action="store_true",
help='''Enables the use of the autorun function.''')
# Parse Arguments
args = parser.parse_args()
input_dir = os.path.abspath(args.input)
image_type = args.image_type
autorun_flag = args.enable_autorun
# Add the images in the provided folder to the image list
img_list = scrape_dir(input_dir)
tds_file = validate_tds_file(args.tds_file, input_dir, image_type)
if args.username is None:
user_name = image_type
else:
user_name = args.username
root = tk.Tk()
TrainingWindow(root, img_list, tds_file, user_name, image_type,
activate_autorun=autorun_flag).pack(side='top', fill='both', expand=True)
root.mainloop()
| mit |
HolgerPeters/scikit-learn | sklearn/metrics/tests/test_ranking.py | 46 | 41270 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
kashif/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 8 | 44274 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
caisq/tensorflow | tensorflow/python/estimator/canned/baseline_test.py | 11 | 54918 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for baseline.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator.canned import baseline
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
BIAS_NAME = 'baseline/bias'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
def _baseline_regressor_fn(*args, **kwargs):
return baseline.BaselineRegressor(*args, **kwargs)
def _baseline_classifier_fn(*args, **kwargs):
return baseline.BaselineClassifier(*args, **kwargs)
# Tests for Baseline Regressor.
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaselineRegressorEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is bias = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([13.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
baseline_regressor = _baseline_regressor_fn(
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = baseline_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is bias = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
label_dim = 2
with ops.Graph().as_default():
variables.Variable([46.0, 58.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = baseline_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is bias which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaselineRegressorPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
with ops.Graph().as_default():
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
baseline_regressor = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = baseline_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = bias, shape=[batch_size, label_dimension]
self.assertAllClose([[0.2, 0.4, 0.6], [0.2, 0.4, 0.6]],
predicted_scores)
class BaselineRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaselineRegressorTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return distribute_lib.increment_var(global_step)
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
label_dimension,
expected_global_step,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([label_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create BaselineRegressor.
label = 5.
age = 17
baseline_regressor = _baseline_regressor_fn(model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(label_dimension=1, expected_global_step=num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
est = _baseline_regressor_fn(
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(label_dimension=1, expected_global_step=200)
def testFromScratch(self):
# Create BaselineRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=num_steps,
expected_bias=[0.])
def testFromCheckpoint(self):
# Create initial checkpoint.
bias = 7.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = 6.
# loss = (logits - label)^2 = (7 - 5)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=[bias])
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias
# logits[0] = 5.
# logits[1] = 5.
# loss = sum(logits - label)^2 = (5 - 5)^2 + (5 - 3)^2 = 4
mock_optimizer = self._mock_optimizer(expected_loss=4.)
baseline_regressor = _baseline_regressor_fn(
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
baseline_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
label_dimension=1,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
# Tests for Baseline Classifier.
class BaselineClassifierTrainingTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return distribute_lib.increment_var(global_step)
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return distribute_lib.increment_var(global_step)
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = baseline.BaselineClassifier(
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formula
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = 1
# so, loss = 1 * -log ( softmax(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
bias = [-1.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = bias = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = bias
# logits[0] = -1.
# logits[1] = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(-1) ) = 0.3132
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = bias and label = [1, 0]
# so, loss = 1 * -log ( softmax(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 0.3132)
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = baseline.BaselineClassifier(
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaselineClassifierEvaluationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = -log(sigmoid(-1)) = 1.3133
# Prediction = sigmoid(-1) = 0.2689
expected_metrics = {
metric_keys.MetricKeys.LOSS: 1.3133,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 1.3133,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( softmax(logits)[label] )
logits = bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# Prediction = sigmoid(-1) = 0.2689
expected_loss = 1.3133 + 0.3132
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.2689,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.75,
}
else:
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
bias = [-1.0] if n_classes == 2 else [-1.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., -1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(-1)) = 0.3132
# weights = [1., 2.]
expected_loss = 1.3133 * 1. + 0.3132 * 2.
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, -1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 2. / (1. + 2.),
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
# Expand logits since batch_size=2
logits = bias * np.ones(shape=(2, 1))
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 2. / (1. + 2.),
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaselineClassifierPredictTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = _baseline_classifier_fn(
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = bias[0]
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [1],
'classes': [label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.array(bias)
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaselineClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_classifier_fn(
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
# Tests for Baseline logit_fn.
class BaselineLogitFnTest(test.TestCase):
def test_basic_logit_correctness(self):
"""baseline_logit_fn simply returns the bias variable."""
with ops.Graph().as_default():
logit_fn = baseline._baseline_logit_fn_builder(num_outputs=2)
logits = logit_fn(features={'age': [[23.], [31.]]})
with variable_scope.variable_scope('baseline', reuse=True):
bias_var = variable_scope.get_variable('bias')
with tf_session.Session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
michigraber/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
rhattersley/cartopy | lib/cartopy/crs.py | 1 | 90404 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
The crs module defines Coordinate Reference Systems and the transformations
between them.
"""
from __future__ import (absolute_import, division, print_function)
from abc import ABCMeta, abstractproperty
import math
import warnings
import numpy as np
import shapely.geometry as sgeom
from shapely.prepared import prep
import six
from cartopy._crs import CRS, Geodetic, Globe, PROJ4_VERSION
from cartopy._crs import Geocentric # noqa: F401 (flake8 = unused import)
import cartopy.trace
__document_these__ = ['CRS', 'Geocentric', 'Geodetic', 'Globe']
WGS84_SEMIMAJOR_AXIS = 6378137.0
WGS84_SEMIMINOR_AXIS = 6356752.3142
class RotatedGeodetic(CRS):
"""
Define a rotated latitude/longitude coordinate system with spherical
topology and geographical distance.
Coordinates are measured in degrees.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude, pole_latitude,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude
Pole longitude position, in unrotated degrees.
pole_latitude
Pole latitude position, in unrotated degrees.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. Defaults to a "WGS84" datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
globe = globe or Globe(datum='WGS84')
super(RotatedGeodetic, self).__init__(proj4_params, globe=globe)
class Projection(six.with_metaclass(ABCMeta, CRS)):
"""
Define a projected coordinate system with flat topology and Euclidean
distance.
"""
_method_map = {
'Point': '_project_point',
'LineString': '_project_line_string',
'LinearRing': '_project_linear_ring',
'Polygon': '_project_polygon',
'MultiPoint': '_project_multipoint',
'MultiLineString': '_project_multiline',
'MultiPolygon': '_project_multipolygon',
}
@abstractproperty
def boundary(self):
pass
@abstractproperty
def threshold(self):
pass
@abstractproperty
def x_limits(self):
pass
@abstractproperty
def y_limits(self):
pass
@property
def cw_boundary(self):
try:
boundary = self._cw_boundary
except AttributeError:
boundary = sgeom.LinearRing(self.boundary)
self._cw_boundary = boundary
return boundary
@property
def ccw_boundary(self):
try:
boundary = self._ccw_boundary
except AttributeError:
boundary = sgeom.LinearRing(self.boundary.coords[::-1])
self._ccw_boundary = boundary
return boundary
@property
def domain(self):
try:
domain = self._domain
except AttributeError:
domain = self._domain = sgeom.Polygon(self.boundary)
return domain
def _determine_longitude_bounds(self, central_longitude):
# In new proj, using exact limits will wrap-around, so subtract a
# small epsilon:
epsilon = 1e-10
minlon = -180 + central_longitude
maxlon = 180 + central_longitude
if central_longitude > 0:
maxlon -= epsilon
elif central_longitude < 0:
minlon += epsilon
return minlon, maxlon
def _repr_html_(self):
import cgi
try:
# As matplotlib is not a core cartopy dependency, don't error
# if it's not available.
import matplotlib.pyplot as plt
except ImportError:
# We can't return an SVG of the CRS, so let Jupyter fall back to
# a default repr by returning None.
return None
# Produce a visual repr of the Projection instance.
fig, ax = plt.subplots(figsize=(5, 3),
subplot_kw={'projection': self})
ax.set_global()
ax.coastlines('auto')
ax.gridlines()
buf = six.StringIO()
fig.savefig(buf, format='svg', bbox_inches='tight')
plt.close(fig)
# "Rewind" the buffer to the start and return it as an svg string.
buf.seek(0)
svg = buf.read()
return '{}<pre>{}</pre>'.format(svg, cgi.escape(repr(self)))
def _as_mpl_axes(self):
import cartopy.mpl.geoaxes as geoaxes
return geoaxes.GeoAxes, {'map_projection': self}
def project_geometry(self, geometry, src_crs=None):
"""
Project the given geometry into this projection.
Parameters
----------
geometry
The geometry to (re-)project.
src_crs: optional
The source CRS. Defaults to None.
If src_crs is None, the source CRS is assumed to be a geodetic
version of the target CRS.
Returns
-------
geometry
The projected result (a shapely geometry).
"""
if src_crs is None:
src_crs = self.as_geodetic()
elif not isinstance(src_crs, CRS):
raise TypeError('Source CRS must be an instance of CRS'
' or one of its subclasses, or None.')
geom_type = geometry.geom_type
method_name = self._method_map.get(geom_type)
if not method_name:
raise ValueError('Unsupported geometry '
'type {!r}'.format(geom_type))
return getattr(self, method_name)(geometry, src_crs)
def _project_point(self, point, src_crs):
return sgeom.Point(*self.transform_point(point.x, point.y, src_crs))
def _project_line_string(self, geometry, src_crs):
return cartopy.trace.project_linear(geometry, src_crs, self)
def _project_linear_ring(self, linear_ring, src_crs):
"""
Project the given LinearRing from the src_crs into this CRS and
returns a list of LinearRings and a single MultiLineString.
"""
debug = False
# 1) Resolve the initial lines into projected segments
# 1abc
# def23ghi
# jkl41
multi_line_string = cartopy.trace.project_linear(linear_ring,
src_crs, self)
# Threshold for whether a point is close enough to be the same
# point as another.
threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
# 2) Simplify the segments where appropriate.
if len(multi_line_string) > 1:
# Stitch together segments which are close to continuous.
# This is important when:
# 1) The first source point projects into the map and the
# ring has been cut by the boundary.
# Continuing the example from above this gives:
# def23ghi
# jkl41abc
# 2) The cut ends of segments are too close to reliably
# place into an order along the boundary.
line_strings = list(multi_line_string)
any_modified = False
i = 0
if debug:
first_coord = np.array([ls.coords[0] for ls in line_strings])
last_coord = np.array([ls.coords[-1] for ls in line_strings])
print('Distance matrix:')
np.set_printoptions(precision=2)
x = first_coord[:, np.newaxis, :]
y = last_coord[np.newaxis, :, :]
print(np.abs(x - y).max(axis=-1))
while i < len(line_strings):
modified = False
j = 0
while j < len(line_strings):
if i != j and np.allclose(line_strings[i].coords[0],
line_strings[j].coords[-1],
atol=threshold):
if debug:
print('Joining together {} and {}.'.format(i, j))
last_coords = list(line_strings[j].coords)
first_coords = list(line_strings[i].coords)[1:]
combo = sgeom.LineString(last_coords + first_coords)
if j < i:
i, j = j, i
del line_strings[j], line_strings[i]
line_strings.append(combo)
modified = True
any_modified = True
break
else:
j += 1
if not modified:
i += 1
if any_modified:
multi_line_string = sgeom.MultiLineString(line_strings)
# 3) Check for rings that have been created by the projection stage.
rings = []
line_strings = []
for line in multi_line_string:
if len(line.coords) > 3 and np.allclose(line.coords[0],
line.coords[-1],
atol=threshold):
result_geometry = sgeom.LinearRing(line.coords[:-1])
rings.append(result_geometry)
else:
line_strings.append(line)
# If we found any rings, then we should re-create the multi-line str.
if rings:
multi_line_string = sgeom.MultiLineString(line_strings)
return rings, multi_line_string
def _project_multipoint(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
geoms.append(self._project_point(geom, src_crs))
if geoms:
return sgeom.MultiPoint(geoms)
else:
return sgeom.MultiPoint()
def _project_multiline(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_line_string(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
return sgeom.MultiLineString(geoms)
else:
return []
def _project_multipolygon(self, geometry, src_crs):
geoms = []
for geom in geometry.geoms:
r = self._project_polygon(geom, src_crs)
if r:
geoms.extend(r.geoms)
if geoms:
result = sgeom.MultiPolygon(geoms)
else:
result = sgeom.MultiPolygon()
return result
def _project_polygon(self, polygon, src_crs):
"""
Return the projected polygon(s) derived from the given polygon.
"""
# Determine orientation of polygon.
# TODO: Consider checking the internal rings have the opposite
# orientation to the external rings?
if src_crs.is_geodetic():
is_ccw = True
else:
is_ccw = polygon.exterior.is_ccw
# Project the polygon exterior/interior rings.
# Each source ring will result in either a ring, or one or more
# lines.
rings = []
multi_lines = []
for src_ring in [polygon.exterior] + list(polygon.interiors):
p_rings, p_mline = self._project_linear_ring(src_ring, src_crs)
if p_rings:
rings.extend(p_rings)
if len(p_mline) > 0:
multi_lines.append(p_mline)
# Convert any lines to rings by attaching them to the boundary.
if multi_lines:
rings.extend(self._attach_lines_to_boundary(multi_lines, is_ccw))
# Resolve all the inside vs. outside rings, and convert to the
# final MultiPolygon.
return self._rings_to_multi_polygon(rings, is_ccw)
def _attach_lines_to_boundary(self, multi_line_strings, is_ccw):
"""
Return a list of LinearRings by attaching the ends of the given lines
to the boundary, paying attention to the traversal directions of the
lines and boundary.
"""
debug = False
debug_plot_edges = False
# Accumulate all the boundary and segment end points, along with
# their distance along the boundary.
edge_things = []
# Get the boundary as a LineString of the correct orientation
# so we can compute distances along it.
if is_ccw:
boundary = self.ccw_boundary
else:
boundary = self.cw_boundary
def boundary_distance(xy):
return boundary.project(sgeom.Point(*xy))
# Squash all the LineStrings into a single list.
line_strings = []
for multi_line_string in multi_line_strings:
line_strings.extend(multi_line_string)
# Record the positions of all the segment ends
for i, line_string in enumerate(line_strings):
first_dist = boundary_distance(line_string.coords[0])
thing = _BoundaryPoint(first_dist, False,
(i, 'first', line_string.coords[0]))
edge_things.append(thing)
last_dist = boundary_distance(line_string.coords[-1])
thing = _BoundaryPoint(last_dist, False,
(i, 'last', line_string.coords[-1]))
edge_things.append(thing)
# Record the positions of all the boundary vertices
for xy in boundary.coords[:-1]:
point = sgeom.Point(*xy)
dist = boundary.project(point)
thing = _BoundaryPoint(dist, True, point)
edge_things.append(thing)
if debug_plot_edges:
import matplotlib.pyplot as plt
current_fig = plt.gcf()
fig = plt.figure()
# Reset the current figure so we don't upset anything.
plt.figure(current_fig.number)
ax = fig.add_subplot(1, 1, 1)
# Order everything as if walking around the boundary.
# NB. We make line end-points take precedence over boundary points
# to ensure that end-points are still found and followed when they
# coincide.
edge_things.sort(key=lambda thing: (thing.distance, thing.kind))
remaining_ls = dict(enumerate(line_strings))
prev_thing = None
for edge_thing in edge_things[:]:
if (prev_thing is not None and
not edge_thing.kind and
not prev_thing.kind and
edge_thing.data[0] == prev_thing.data[0]):
j = edge_thing.data[0]
# Insert a edge boundary point in between this geometry.
mid_dist = (edge_thing.distance + prev_thing.distance) * 0.5
mid_point = boundary.interpolate(mid_dist)
new_thing = _BoundaryPoint(mid_dist, True, mid_point)
if debug:
print('Artificially insert boundary: {}'.format(new_thing))
ind = edge_things.index(edge_thing)
edge_things.insert(ind, new_thing)
prev_thing = None
else:
prev_thing = edge_thing
if debug:
print()
print('Edge things')
for thing in edge_things:
print(' ', thing)
if debug_plot_edges:
for thing in edge_things:
if isinstance(thing.data, sgeom.Point):
ax.plot(*thing.data.xy, marker='o')
else:
ax.plot(*thing.data[2], marker='o')
ls = line_strings[thing.data[0]]
coords = np.array(ls.coords)
ax.plot(coords[:, 0], coords[:, 1])
ax.text(coords[0, 0], coords[0, 1], thing.data[0])
ax.text(coords[-1, 0], coords[-1, 1],
'{}.'.format(thing.data[0]))
def filter_last(t):
return t.kind or t.data[1] == 'first'
edge_things = list(filter(filter_last, edge_things))
processed_ls = []
while remaining_ls:
# Rename line_string to current_ls
i, current_ls = remaining_ls.popitem()
if debug:
import sys
sys.stdout.write('+')
sys.stdout.flush()
print()
print('Processing: %s, %s' % (i, current_ls))
added_linestring = set()
while True:
# Find out how far around this linestring's last
# point is on the boundary. We will use this to find
# the next point on the boundary.
d_last = boundary_distance(current_ls.coords[-1])
if debug:
print(' d_last: {!r}'.format(d_last))
next_thing = _find_first_ge(edge_things, d_last)
# Remove this boundary point from the edge.
edge_things.remove(next_thing)
if debug:
print(' next_thing:', next_thing)
if next_thing.kind:
# We've just got a boundary point, add it, and keep going.
if debug:
print(' adding boundary point')
boundary_point = next_thing.data
combined_coords = (list(current_ls.coords) +
[(boundary_point.x, boundary_point.y)])
current_ls = sgeom.LineString(combined_coords)
elif next_thing.data[0] == i:
# We've gone all the way around and are now back at the
# first boundary thing.
if debug:
print(' close loop')
processed_ls.append(current_ls)
if debug_plot_edges:
coords = np.array(current_ls.coords)
ax.plot(coords[:, 0], coords[:, 1], color='black',
linestyle='--')
break
else:
if debug:
print(' adding line')
j = next_thing.data[0]
line_to_append = line_strings[j]
if j in remaining_ls:
remaining_ls.pop(j)
coords_to_append = list(line_to_append.coords)
# Build up the linestring.
current_ls = sgeom.LineString((list(current_ls.coords) +
coords_to_append))
# Catch getting stuck in an infinite loop by checking that
# linestring only added once.
if j not in added_linestring:
added_linestring.add(j)
else:
if debug_plot_edges:
plt.show()
raise RuntimeError('Unidentified problem with '
'geometry, linestring being '
're-added. Please raise an issue.')
# filter out any non-valid linear rings
linear_rings = [
sgeom.LinearRing(linear_ring)
for linear_ring in processed_ls
if len(linear_ring.coords) > 2 and linear_ring.is_valid]
if debug:
print(' DONE')
return linear_rings
def _rings_to_multi_polygon(self, rings, is_ccw):
exterior_rings = []
interior_rings = []
for ring in rings:
if ring.is_ccw != is_ccw:
interior_rings.append(ring)
else:
exterior_rings.append(ring)
polygon_bits = []
# Turn all the exterior rings into polygon definitions,
# "slurping up" any interior rings they contain.
for exterior_ring in exterior_rings:
polygon = sgeom.Polygon(exterior_ring)
prep_polygon = prep(polygon)
holes = []
for interior_ring in interior_rings[:]:
if prep_polygon.contains(interior_ring):
holes.append(interior_ring)
interior_rings.remove(interior_ring)
elif polygon.crosses(interior_ring):
# Likely that we have an invalid geometry such as
# that from #509 or #537.
holes.append(interior_ring)
interior_rings.remove(interior_ring)
polygon_bits.append((exterior_ring.coords,
[ring.coords for ring in holes]))
# Any left over "interior" rings need "inverting" with respect
# to the boundary.
if interior_rings:
boundary_poly = self.domain
x3, y3, x4, y4 = boundary_poly.bounds
bx = (x4 - x3) * 0.1
by = (y4 - y3) * 0.1
x3 -= bx
y3 -= by
x4 += bx
y4 += by
for ring in interior_rings:
# Use shapely buffer in an attempt to fix invalid geometries
polygon = sgeom.Polygon(ring).buffer(0)
if not polygon.is_empty and polygon.is_valid:
x1, y1, x2, y2 = polygon.bounds
bx = (x2 - x1) * 0.1
by = (y2 - y1) * 0.1
x1 -= bx
y1 -= by
x2 += bx
y2 += by
box = sgeom.box(min(x1, x3), min(y1, y3),
max(x2, x4), max(y2, y4))
# Invert the polygon
polygon = box.difference(polygon)
# Intersect the inverted polygon with the boundary
polygon = boundary_poly.intersection(polygon)
if not polygon.is_empty:
polygon_bits.append(polygon)
if polygon_bits:
multi_poly = sgeom.MultiPolygon(polygon_bits)
else:
multi_poly = sgeom.MultiPolygon()
return multi_poly
def quick_vertices_transform(self, vertices, src_crs):
"""
Where possible, return a vertices array transformed to this CRS from
the given vertices array of shape ``(n, 2)`` and the source CRS.
Note
----
This method may return None to indicate that the vertices cannot
be transformed quickly, and a more complex geometry transformation
is required (see :meth:`cartopy.crs.Projection.project_geometry`).
"""
return_value = None
if self == src_crs:
x = vertices[:, 0]
y = vertices[:, 1]
# Extend the limits a tiny amount to allow for precision mistakes
epsilon = 1.e-10
x_limits = (self.x_limits[0] - epsilon, self.x_limits[1] + epsilon)
y_limits = (self.y_limits[0] - epsilon, self.y_limits[1] + epsilon)
if (x.min() >= x_limits[0] and x.max() <= x_limits[1] and
y.min() >= y_limits[0] and y.max() <= y_limits[1]):
return_value = vertices
return return_value
class _RectangularProjection(six.with_metaclass(ABCMeta, Projection)):
"""
The abstract superclass of projections with a rectangular domain which
is symmetric about the origin.
"""
def __init__(self, proj4_params, half_width, half_height, globe=None):
self._half_width = half_width
self._half_height = half_height
super(_RectangularProjection, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
# XXX Should this be a LinearRing?
w, h = self._half_width, self._half_height
return sgeom.LineString([(-w, -h), (-w, h), (w, h), (w, -h), (-w, -h)])
@property
def x_limits(self):
return (-self._half_width, self._half_width)
@property
def y_limits(self):
return (-self._half_height, self._half_height)
class _CylindricalProjection(six.with_metaclass(ABCMeta,
_RectangularProjection)):
"""
The abstract class which denotes cylindrical projections where we
want to allow x values to wrap around.
"""
def _ellipse_boundary(semimajor=2, semiminor=1, easting=0, northing=0, n=201):
"""
Define a projection boundary using an ellipse.
This type of boundary is used by several projections.
"""
t = np.linspace(0, -2 * np.pi, n) # Clockwise boundary.
coords = np.vstack([semimajor * np.cos(t), semiminor * np.sin(t)])
coords += ([easting], [northing])
return coords
class PlateCarree(_CylindricalProjection):
def __init__(self, central_longitude=0.0, globe=None):
proj4_params = [('proj', 'eqc'), ('lon_0', central_longitude)]
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1))
a_rad = math.radians(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
x_max = a_rad * 180
y_max = a_rad * 90
# Set the threshold around 0.5 if the x max is 180.
self._threshold = x_max / 360.
super(PlateCarree, self).__init__(proj4_params, x_max, y_max,
globe=globe)
@property
def threshold(self):
return self._threshold
def _bbox_and_offset(self, other_plate_carree):
"""
Return a pair of (xmin, xmax) pairs and an offset which can be used
for identification of whether data in ``other_plate_carree`` needs
to be transformed to wrap appropriately.
>>> import cartopy.crs as ccrs
>>> src = ccrs.PlateCarree(central_longitude=10)
>>> bboxes, offset = ccrs.PlateCarree()._bbox_and_offset(src)
>>> print(bboxes)
[[-180.0, -170.0], [-170.0, 180.0]]
>>> print(offset)
10.0
The returned values are longitudes in ``other_plate_carree``'s
coordinate system.
Warning
-------
The two CRSs must be identical in every way, other than their
central longitudes. No checking of this is done.
"""
self_lon_0 = self.proj4_params['lon_0']
other_lon_0 = other_plate_carree.proj4_params['lon_0']
lon_0_offset = other_lon_0 - self_lon_0
lon_lower_bound_0 = self.x_limits[0]
lon_lower_bound_1 = (other_plate_carree.x_limits[0] + lon_0_offset)
if lon_lower_bound_1 < self.x_limits[0]:
lon_lower_bound_1 += np.diff(self.x_limits)[0]
lon_lower_bound_0, lon_lower_bound_1 = sorted(
[lon_lower_bound_0, lon_lower_bound_1])
bbox = [[lon_lower_bound_0, lon_lower_bound_1],
[lon_lower_bound_1, lon_lower_bound_0]]
bbox[1][1] += np.diff(self.x_limits)[0]
return bbox, lon_0_offset
def quick_vertices_transform(self, vertices, src_crs):
return_value = super(PlateCarree,
self).quick_vertices_transform(vertices, src_crs)
# Optimise the PlateCarree -> PlateCarree case where no
# wrapping or interpolation needs to take place.
if return_value is None and isinstance(src_crs, PlateCarree):
self_params = self.proj4_params.copy()
src_params = src_crs.proj4_params.copy()
self_params.pop('lon_0'), src_params.pop('lon_0')
xs, ys = vertices[:, 0], vertices[:, 1]
potential = (self_params == src_params and
self.y_limits[0] <= ys.min() and
self.y_limits[1] >= ys.max())
if potential:
mod = np.diff(src_crs.x_limits)[0]
bboxes, proj_offset = self._bbox_and_offset(src_crs)
x_lim = xs.min(), xs.max()
for poly in bboxes:
# Arbitrarily choose the number of moduli to look
# above and below the -180->180 range. If data is beyond
# this range, we're not going to transform it quickly.
for i in [-1, 0, 1, 2]:
offset = mod * i - proj_offset
if ((poly[0] + offset) <= x_lim[0] and
(poly[1] + offset) >= x_lim[1]):
return_value = vertices + [[-offset, 0]]
break
if return_value is not None:
break
return return_value
class TransverseMercator(Projection):
"""
A Transverse Mercator projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
scale_factor=1.0, globe=None):
"""
Parameters
----------
central_longitude: optional
The true longitude of the central meridian in degrees.
Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees. Defaults to 0.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
scale_factor: optional
Scale factor at the central meridian. Defaults to 1.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
proj4_params = [('proj', 'tmerc'), ('lon_0', central_longitude),
('lat_0', central_latitude), ('k', scale_factor),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
super(TransverseMercator, self).__init__(proj4_params, globe=globe)
@property
def threshold(self):
return 1e4
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return (-2e7, 2e7)
@property
def y_limits(self):
return (-1e7, 1e7)
class OSGB(TransverseMercator):
def __init__(self):
super(OSGB, self).__init__(central_longitude=-2, central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=Globe(datum='OSGB36', ellipse='airy'))
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (0, 7e5)
@property
def y_limits(self):
return (0, 13e5)
class OSNI(TransverseMercator):
def __init__(self):
globe = Globe(semimajor_axis=6377340.189,
semiminor_axis=6356034.447938534)
super(OSNI, self).__init__(central_longitude=-8,
central_latitude=53.5,
scale_factor=1.000035,
false_easting=200000,
false_northing=250000,
globe=globe)
@property
def boundary(self):
w = self.x_limits[1] - self.x_limits[0]
h = self.y_limits[1] - self.y_limits[0]
return sgeom.LineString([(0, 0), (0, h), (w, h), (w, 0), (0, 0)])
@property
def x_limits(self):
return (18814.9667, 386062.3293)
@property
def y_limits(self):
return (11764.8481, 464720.9559)
class UTM(Projection):
"""
Universal Transverse Mercator projection.
"""
def __init__(self, zone, southern_hemisphere=False, globe=None):
"""
Parameters
----------
zone
The numeric zone of the UTM required.
southern_hemisphere: optional
Set to True if the zone is in the southern hemisphere. Defaults to
False.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
proj4_params = [('proj', 'utm'),
('units', 'm'),
('zone', zone)]
if southern_hemisphere:
proj4_params.append(('south', None))
super(UTM, self).__init__(proj4_params, globe=globe)
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def threshold(self):
return 1e2
@property
def x_limits(self):
easting = 5e5
# allow 50% overflow
return (0 - easting/2, 2 * easting + easting/2)
@property
def y_limits(self):
northing = 1e7
# allow 50% overflow
return (0 - northing, 2 * northing + northing/2)
class EuroPP(UTM):
"""
UTM Zone 32 projection for EuroPP domain.
Ellipsoid is International 1924, Datum is ED50.
"""
def __init__(self):
globe = Globe(ellipse='intl')
super(EuroPP, self).__init__(32, globe=globe)
@property
def x_limits(self):
return (-1.4e6, 2e6)
@property
def y_limits(self):
return (4e6, 7.9e6)
class Mercator(Projection):
"""
A Mercator projection.
"""
def __init__(self, central_longitude=0.0,
min_latitude=-80.0, max_latitude=84.0,
globe=None, latitude_true_scale=None,
false_easting=0.0, false_northing=0.0, scale_factor=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
min_latitude: optional
The maximum southerly extent of the projection. Defaults
to -80 degrees.
max_latitude: optional
The maximum northerly extent of the projection. Defaults
to 84 degrees.
globe: A :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
latitude_true_scale: optional
The latitude where the scale is 1. Defaults to 0 degrees.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
scale_factor: optional
Scale factor at natural origin. Defaults to unused.
Notes
-----
Only one of ``latitude_true_scale`` and ``scale_factor`` should
be included.
"""
proj4_params = [('proj', 'merc'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing),
('units', 'm')]
# If it's None, we don't pass it to Proj4, in which case its default
# of 0.0 will be used.
if latitude_true_scale is not None:
proj4_params.append(('lat_ts', latitude_true_scale))
if scale_factor is not None:
if latitude_true_scale is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "latitude_true_scale". ')
else:
proj4_params.append(('k_0', scale_factor))
super(Mercator, self).__init__(proj4_params, globe=globe)
# Calculate limits.
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
limits = self.transform_points(Geodetic(),
np.array([minlon, maxlon]),
np.array([min_latitude, max_latitude]))
self._x_limits = tuple(limits[..., 0])
self._y_limits = tuple(limits[..., 1])
self._threshold = min(np.diff(self.x_limits)[0] / 720,
np.diff(self.y_limits)[0] / 360)
def __eq__(self, other):
res = super(Mercator, self).__eq__(other)
if hasattr(other, "_y_limits") and hasattr(other, "_x_limits"):
res = res and self._y_limits == other._y_limits and \
self._x_limits == other._x_limits
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self._x_limits, self._y_limits))
@property
def threshold(self):
return self._threshold
@property
def boundary(self):
x0, x1 = self.x_limits
y0, y1 = self.y_limits
return sgeom.LineString([(x0, y0), (x0, y1),
(x1, y1), (x1, y0),
(x0, y0)])
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# Define a specific instance of a Mercator projection, the Google mercator.
Mercator.GOOGLE = Mercator(min_latitude=-85.0511287798066,
max_latitude=85.0511287798066,
globe=Globe(ellipse=None,
semimajor_axis=WGS84_SEMIMAJOR_AXIS,
semiminor_axis=WGS84_SEMIMAJOR_AXIS,
nadgrids='@null'))
# Deprecated form
GOOGLE_MERCATOR = Mercator.GOOGLE
class LambertCylindrical(_RectangularProjection):
def __init__(self, central_longitude=0.0):
proj4_params = [('proj', 'cea'), ('lon_0', central_longitude)]
globe = Globe(semimajor_axis=math.degrees(1))
super(LambertCylindrical, self).__init__(proj4_params, 180,
math.degrees(1), globe=globe)
@property
def threshold(self):
return 0.5
class LambertConformal(Projection):
"""
A Lambert Conformal conic projection.
"""
def __init__(self, central_longitude=-96.0, central_latitude=39.0,
false_easting=0.0, false_northing=0.0,
secant_latitudes=None, standard_parallels=None,
globe=None, cutoff=-30):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to -96.
central_latitude: optional
The central latitude. Defaults to 39.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
secant_latitudes: optional
Secant latitudes. This keyword is deprecated in v0.12 and directly
replaced by ``standard parallels``. Defaults to None.
standard_parallels: optional
Standard parallel latitude(s). Defaults to (33, 45).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
cutoff: optional
Latitude of map cutoff.
The map extends to infinity opposite the central pole
so we must cut off the map drawing before then.
A value of 0 will draw half the globe. Defaults to -30.
"""
proj4_params = [('proj', 'lcc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if secant_latitudes and standard_parallels:
raise TypeError('standard_parallels replaces secant_latitudes.')
elif secant_latitudes is not None:
warnings.warn('secant_latitudes has been deprecated in v0.12. '
'The standard_parallels keyword can be used as a '
'direct replacement.')
standard_parallels = secant_latitudes
elif standard_parallels is None:
# The default. Put this as a keyword arg default once
# secant_latitudes is removed completely.
standard_parallels = (33, 45)
n_parallels = len(standard_parallels)
if not 1 <= n_parallels <= 2:
raise ValueError('1 or 2 standard parallels must be specified. '
'Got {} ({})'.format(n_parallels,
standard_parallels))
proj4_params.append(('lat_1', standard_parallels[0]))
if n_parallels == 2:
proj4_params.append(('lat_2', standard_parallels[1]))
super(LambertConformal, self).__init__(proj4_params, globe=globe)
# Compute whether this projection is at the "north pole" or the
# "south pole" (after the central lon/lat have been taken into
# account).
if n_parallels == 1:
plat = 90 if standard_parallels[0] > 0 else -90
else:
# Which pole are the parallels closest to? That is the direction
# that the cone converges.
if abs(standard_parallels[0]) > abs(standard_parallels[1]):
poliest_sec = standard_parallels[0]
else:
poliest_sec = standard_parallels[1]
plat = 90 if poliest_sec > 0 else -90
self.cutoff = cutoff
n = 91
lons = np.empty(n + 2)
lats = np.full(n + 2, cutoff)
lons[0] = lons[-1] = 0
lats[0] = lats[-1] = plat
if plat == 90:
# Ensure clockwise
lons[1:-1] = np.linspace(central_longitude + 180 - 0.001,
central_longitude - 180 + 0.001, n)
else:
lons[1:-1] = np.linspace(central_longitude - 180 + 0.001,
central_longitude + 180 - 0.001, n)
points = self.transform_points(PlateCarree(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
def __eq__(self, other):
res = super(LambertConformal, self).__eq__(other)
if hasattr(other, "cutoff"):
res = res and self.cutoff == other.cutoff
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self.cutoff))
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class LambertAzimuthalEqualArea(Projection):
"""
A Lambert Azimuthal Equal-Area projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'laea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
super(LambertAzimuthalEqualArea, self).__init__(proj4_params,
globe=globe)
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
# Find the antipode, and shift it a small amount in latitude to
# approximate the extent of the projection:
lon = central_longitude + 180
sign = np.sign(central_latitude) or 1
lat = -central_latitude + sign * 0.01
x, max_y = self.transform_point(lon, lat, PlateCarree())
coords = _ellipse_boundary(a * 1.9999, max_y - false_northing,
false_easting, false_northing, 61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Miller(_RectangularProjection):
def __init__(self, central_longitude=0.0, globe=None):
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1), ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = np.float(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(globe.semiminor_axis or a)
if b != a or globe.ellipse is not None:
warnings.warn('The proj "mill" projection does not handle '
'elliptical globes.')
proj4_params = [('proj', 'mill'), ('lon_0', central_longitude)]
# See Snyder, 1987. Eqs (11-1) and (11-2) substituting maximums of
# (lambda-lambda0)=180 and phi=90 to get limits.
super(Miller, self).__init__(proj4_params,
a * np.pi, a * 2.303412543376391,
globe=globe)
@property
def threshold(self):
return 0.5
class RotatedPole(_CylindricalProjection):
"""
A rotated latitude/longitude projected coordinate system
with cylindrical topology and projected distance.
Coordinates are measured in projection metres.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude=0.0, pole_latitude=90.0,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude: optional
Pole longitude position, in unrotated degrees. Defaults to 0.
pole_latitude: optional
Pole latitude position, in unrotated degrees. Defaults to 0.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
An optional :class:`cartopy.crs.Globe`. Defaults to a "WGS84"
datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
super(RotatedPole, self).__init__(proj4_params, 180, 90, globe=globe)
@property
def threshold(self):
return 0.5
class Gnomonic(Projection):
def __init__(self, central_latitude=0.0,
central_longitude=0.0, globe=None):
proj4_params = [('proj', 'gnom'), ('lat_0', central_latitude),
('lon_0', central_longitude)]
super(Gnomonic, self).__init__(proj4_params, globe=globe)
self._max = 5e7
@property
def boundary(self):
return sgeom.Point(0, 0).buffer(self._max).exterior
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return (-self._max, self._max)
@property
def y_limits(self):
return (-self._max, self._max)
class Stereographic(Projection):
def __init__(self, central_latitude=0.0, central_longitude=0.0,
false_easting=0.0, false_northing=0.0,
true_scale_latitude=None,
scale_factor=None, globe=None):
# Warn when using Stereographic with proj < 5.0.0 due to
# incorrect transformation with lon_0=0 (see
# https://github.com/OSGeo/proj.4/issues/194).
if central_latitude == 0:
if PROJ4_VERSION != ():
if PROJ4_VERSION < (5, 0, 0):
warnings.warn(
'The Stereographic projection in Proj older than '
'5.0.0 incorrectly transforms points when '
'central_latitude=0. Use this projection with '
'caution.')
else:
warnings.warn(
'Cannot determine Proj version. The Stereographic '
'projection may be unreliable and should be used with '
'caution.')
proj4_params = [('proj', 'stere'), ('lat_0', central_latitude),
('lon_0', central_longitude),
('x_0', false_easting), ('y_0', false_northing)]
if true_scale_latitude is not None:
if central_latitude not in (-90., 90.):
warnings.warn('"true_scale_latitude" parameter is only used '
'for polar stereographic projections. Consider '
'the use of "scale_factor" instead.')
proj4_params.append(('lat_ts', true_scale_latitude))
if scale_factor is not None:
if true_scale_latitude is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "true_scale_latitude". '
'Ignoring "scale_factor".')
else:
proj4_params.append(('k_0', scale_factor))
super(Stereographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
# Note: The magic number has been picked to maintain consistent
# behaviour with a wgs84 globe. There is no guarantee that the scaling
# should even be linear.
x_axis_offset = 5e7 / WGS84_SEMIMAJOR_AXIS
y_axis_offset = 5e7 / WGS84_SEMIMINOR_AXIS
self._x_limits = (-a * x_axis_offset + false_easting,
a * x_axis_offset + false_easting)
self._y_limits = (-b * y_axis_offset + false_northing,
b * y_axis_offset + false_northing)
coords = _ellipse_boundary(self._x_limits[1], self._y_limits[1],
false_easting, false_northing, 91)
self._boundary = sgeom.LinearRing(coords.T)
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class NorthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(NorthPolarStereo, self).__init__(
central_latitude=90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is +90
globe=globe)
class SouthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(SouthPolarStereo, self).__init__(
central_latitude=-90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is -90
globe=globe)
class Orthographic(Projection):
def __init__(self, central_longitude=0.0, central_latitude=0.0,
globe=None):
if PROJ4_VERSION != ():
if (5, 0, 0) <= PROJ4_VERSION < (5, 1, 0):
warnings.warn(
'The Orthographic projection in Proj between 5.0.0 and '
'5.1.0 incorrectly transforms points. Use this projection '
'with caution.')
else:
warnings.warn(
'Cannot determine Proj version. The Orthographic projection '
'may be unreliable and should be used with caution.')
proj4_params = [('proj', 'ortho'), ('lon_0', central_longitude),
('lat_0', central_latitude)]
super(Orthographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
if b != a:
warnings.warn('The proj "ortho" projection does not appear to '
'handle elliptical globes.')
# To stabilise the projection of geometries, we reduce the boundary by
# a tiny fraction at the cost of the extreme edges.
coords = _ellipse_boundary(a * 0.99999, b * 0.99999, n=61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _WarpedRectangularProjection(six.with_metaclass(ABCMeta, Projection)):
def __init__(self, proj4_params, central_longitude,
false_easting=None, false_northing=None, globe=None):
if false_easting is not None:
proj4_params += [('x_0', false_easting)]
if false_northing is not None:
proj4_params += [('y_0', false_northing)]
super(_WarpedRectangularProjection, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Eckert(six.with_metaclass(ABCMeta, _WarpedRectangularProjection)):
"""
An Eckert projection.
This class implements all the methods common to the Eckert family of
projections.
"""
def __init__(self, central_longitude=0, false_easting=None,
false_northing=None, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "{}" projection does not handle '
'elliptical globes.'.format(self._proj_name))
proj4_params = [('proj', self._proj_name),
('lon_0', central_longitude)]
super(_Eckert, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class EckertI(_Eckert):
"""
An Eckert I projection.
This projection is pseudocylindrical, but not equal-area. Both meridians
and parallels are straight lines. Its equal-area pair is :class:`EckertII`.
"""
_proj_name = 'eck1'
class EckertII(_Eckert):
"""
An Eckert II projection.
This projection is pseudocylindrical, and equal-area. Both meridians and
parallels are straight lines. Its non-equal-area pair with equally-spaced
parallels is :class:`EckertI`.
"""
_proj_name = 'eck2'
class EckertIII(_Eckert):
"""
An Eckert III projection.
This projection is pseudocylindrical, but not equal-area. Parallels are
equally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Its equal-area pair is :class:`EckertIV`.
"""
_proj_name = 'eck3'
class EckertIV(_Eckert):
"""
An Eckert IV projection.
This projection is pseudocylindrical, and equal-area. Parallels are
unequally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Its non-equal-area pair with equally-spaced
parallels is :class:`EckertIII`.
It is commonly used for world maps.
"""
_proj_name = 'eck4'
class EckertV(_Eckert):
"""
An Eckert V projection.
This projection is pseudocylindrical, but not equal-area. Parallels are
equally-spaced straight lines, while meridians are sinusoidal arcs. Its
equal-area pair is :class:`EckertVI`.
"""
_proj_name = 'eck5'
class EckertVI(_Eckert):
"""
An Eckert VI projection.
This projection is pseudocylindrical, and equal-area. Parallels are
unequally-spaced straight lines, while meridians are sinusoidal arcs. Its
non-equal-area pair with equally-spaced parallels is :class:`EckertV`.
It is commonly used for world maps.
"""
_proj_name = 'eck6'
class EqualEarth(_WarpedRectangularProjection):
u"""
An Equal Earth projection.
This projection is pseudocylindrical, and equal area. Parallels are
unequally-spaced straight lines, while meridians are equally-spaced arcs.
It is intended for world maps.
Note
----
To use this projection, you must be using Proj 5.2.0 or newer.
References
----------
Bojan \u0160avri\u010d, Tom Patterson & Bernhard Jenny (2018) The Equal
Earth map projection, International Journal of Geographical Information
Science, DOI: 10.1080/13658816.2018.1504949
"""
def __init__(self, central_longitude=0, false_easting=None,
false_northing=None, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
"""
if PROJ4_VERSION < (5, 2, 0):
raise ValueError('The EqualEarth projection requires Proj version '
'5.2.0, but you are using {}.'
.format('.'.join(str(v) for v in PROJ4_VERSION)))
proj_params = [('proj', 'eqearth'), ('lon_0', central_longitude)]
super(EqualEarth, self).__init__(proj_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class Mollweide(_WarpedRectangularProjection):
"""
A Mollweide projection.
This projection is pseudocylindrical, and equal area. Parallels are
unequally-spaced straight lines, while meridians are elliptical arcs up to
semicircles on the edges. Poles are points.
It is commonly used for world maps, or interrupted with several central
meridians.
"""
def __init__(self, central_longitude=0, globe=None,
false_easting=None, false_northing=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "moll" projection does not handle '
'elliptical globes.')
proj4_params = [('proj', 'moll'), ('lon_0', central_longitude)]
super(Mollweide, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e5
class Robinson(_WarpedRectangularProjection):
"""
A Robinson projection.
This projection is pseudocylindrical, and a compromise that is neither
equal-area nor conformal. Parallels are unequally-spaced straight lines,
and meridians are curved lines of no particular form.
It is commonly used for "visually-appealing" world maps.
"""
def __init__(self, central_longitude=0, globe=None,
false_easting=None, false_northing=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
false_easting: float, optional
X offset from planar origin in metres. Defaults to 0.
false_northing: float, optional
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
# Warn when using Robinson with proj 4.8 due to discontinuity at
# 40 deg N introduced by incomplete fix to issue #113 (see
# https://github.com/OSGeo/proj.4/issues/113).
if PROJ4_VERSION != ():
if (4, 8) <= PROJ4_VERSION < (4, 9):
warnings.warn('The Robinson projection in the v4.8.x series '
'of Proj contains a discontinuity at '
'40 deg latitude. Use this projection with '
'caution.')
else:
warnings.warn('Cannot determine Proj version. The Robinson '
'projection may be unreliable and should be used '
'with caution.')
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "robin" projection does not handle '
'elliptical globes.')
proj4_params = [('proj', 'robin'), ('lon_0', central_longitude)]
super(Robinson, self).__init__(proj4_params, central_longitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
@property
def threshold(self):
return 1e4
def transform_point(self, x, y, src_crs):
"""
Capture and handle any input NaNs, else invoke parent function,
:meth:`_WarpedRectangularProjection.transform_point`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
Note
----
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
"""
if np.isnan(x) or np.isnan(y):
result = (np.nan, np.nan)
else:
result = super(Robinson, self).transform_point(x, y, src_crs)
return result
def transform_points(self, src_crs, x, y, z=None):
"""
Capture and handle NaNs in input points -- else as parent function,
:meth:`_WarpedRectangularProjection.transform_points`.
Needed because input NaNs can trigger a fatal error in the underlying
implementation of the Robinson projection.
Note
----
Although the original can in fact translate (nan, lat) into
(nan, y-value), this patched version doesn't support that.
Instead, we invalidate any of the points that contain a NaN.
"""
input_point_nans = np.isnan(x) | np.isnan(y)
if z is not None:
input_point_nans |= np.isnan(z)
handle_nans = np.any(input_point_nans)
if handle_nans:
# Remove NaN points from input data to avoid the error.
x[input_point_nans] = 0.0
y[input_point_nans] = 0.0
if z is not None:
z[input_point_nans] = 0.0
result = super(Robinson, self).transform_points(src_crs, x, y, z)
if handle_nans:
# Result always has shape (N, 3).
# Blank out each (whole) point where we had a NaN in the input.
result[input_point_nans] = np.nan
return result
class InterruptedGoodeHomolosine(Projection):
def __init__(self, central_longitude=0, globe=None):
proj4_params = [('proj', 'igh'), ('lon_0', central_longitude)]
super(InterruptedGoodeHomolosine, self).__init__(proj4_params,
globe=globe)
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
epsilon = 1e-10
# Obtain boundary points
n = 31
top_interrupted_lons = (-40.0,)
bottom_interrupted_lons = (80.0, -20.0, -100.0)
lons = np.empty(
(2 + 2 * len(top_interrupted_lons + bottom_interrupted_lons)) * n +
1)
lats = np.empty(
(2 + 2 * len(top_interrupted_lons + bottom_interrupted_lons)) * n +
1)
end = 0
# Left boundary
lons[end:end + n] = minlon
lats[end:end + n] = np.linspace(-90, 90, n)
end += n
# Top boundary
for lon in top_interrupted_lons:
lons[end:end + n] = lon - epsilon + central_longitude
lats[end:end + n] = np.linspace(90, 0, n)
end += n
lons[end:end + n] = lon + epsilon + central_longitude
lats[end:end + n] = np.linspace(0, 90, n)
end += n
# Right boundary
lons[end:end + n] = maxlon
lats[end:end + n] = np.linspace(90, -90, n)
end += n
# Bottom boundary
for lon in bottom_interrupted_lons:
lons[end:end + n] = lon + epsilon + central_longitude
lats[end:end + n] = np.linspace(-90, 0, n)
end += n
lons[end:end + n] = lon - epsilon + central_longitude
lats[end:end + n] = np.linspace(0, -90, n)
end += n
# Close loop
lons[-1] = minlon
lats[-1] = -90
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 2e4
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Satellite(Projection):
def __init__(self, projection, satellite_height=35785831,
central_longitude=0.0, central_latitude=0.0,
false_easting=0, false_northing=0, globe=None,
sweep_axis=None):
proj4_params = [('proj', projection), ('lon_0', central_longitude),
('lat_0', central_latitude), ('h', satellite_height),
('x_0', false_easting), ('y_0', false_northing),
('units', 'm')]
if sweep_axis:
proj4_params.append(('sweep', sweep_axis))
super(_Satellite, self).__init__(proj4_params, globe=globe)
def _set_boundary(self, coords):
self._boundary = sgeom.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Geostationary(_Satellite):
"""
A view appropriate for satellites in Geostationary Earth orbit.
Perspective view looking directly down from above a point on the equator.
In this projection, the projected coordinates are scanning angles measured
from the satellite looking directly downward, multiplied by the height of
the satellite.
"""
def __init__(self, central_longitude=0.0, satellite_height=35785831,
false_easting=0, false_northing=0, globe=None,
sweep_axis='y'):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
satellite_height: float, optional
The height of the satellite. Defaults to 35785831 meters
(true geostationary orbit).
false_easting:
X offset from planar origin in metres. Defaults to 0.
false_northing:
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
sweep_axis: 'x' or 'y', optional. Defaults to 'y'.
Controls which axis is scanned first, and thus which angle is
applied first. The default is appropriate for Meteosat, while
'x' should be used for GOES.
"""
super(Geostationary, self).__init__(
projection='geos',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=0.0,
false_easting=false_easting,
false_northing=false_northing,
globe=globe,
sweep_axis=sweep_axis)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
h = np.float(satellite_height)
# These are only exact for a spherical Earth, owing to assuming a is
# constant. Handling elliptical would be much harder for this.
sin_max_th = a / (a + h)
tan_max_th = a / np.sqrt((a + h) ** 2 - a ** 2)
# Using Napier's rules for right spherical triangles
# See R2 and R6 (x and y coords are h * b and h * a, respectively):
# https://en.wikipedia.org/wiki/Spherical_trigonometry
t = np.linspace(0, -2 * np.pi, 61) # Clockwise boundary.
coords = np.vstack([np.arctan(tan_max_th * np.cos(t)),
np.arcsin(sin_max_th * np.sin(t))])
coords *= h
coords += np.array([[false_easting], [false_northing]])
self._set_boundary(coords)
class NearsidePerspective(_Satellite):
"""
Perspective view looking directly down from above a point on the globe.
In this projection, the projected coordinates are x and y measured from
the origin of a plane tangent to the Earth directly below the perspective
point (e.g. a satellite).
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
satellite_height=35785831,
false_easting=0, false_northing=0, globe=None):
"""
Parameters
----------
central_longitude: float, optional
The central longitude. Defaults to 0.
central_latitude: float, optional
The central latitude. Defaults to 0.
satellite_height: float, optional
The height of the satellite. Defaults to 35785831 meters
(true geostationary orbit).
false_easting:
X offset from planar origin in metres. Defaults to 0.
false_northing:
Y offset from planar origin in metres. Defaults to 0.
globe: :class:`cartopy.crs.Globe`, optional
If omitted, a default globe is created.
.. note::
This projection does not handle elliptical globes.
"""
if globe is None:
globe = Globe(semimajor_axis=WGS84_SEMIMAJOR_AXIS, ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS
b = globe.semiminor_axis or a
if b != a or globe.ellipse is not None:
warnings.warn('The proj "nsper" projection does not handle '
'elliptical globes.')
super(NearsidePerspective, self).__init__(
projection='nsper',
satellite_height=satellite_height,
central_longitude=central_longitude,
central_latitude=central_latitude,
false_easting=false_easting,
false_northing=false_northing,
globe=globe)
h = np.float(satellite_height)
max_x = a * np.sqrt(h / (2 * a + h))
coords = _ellipse_boundary(max_x, max_x,
false_easting, false_northing, 61)
self._set_boundary(coords)
class AlbersEqualArea(Projection):
"""
An Albers Equal Area projection
This projection is conic and equal-area, and is commonly used for maps of
the conterminous United States.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
standard_parallels: optional
The one or two latitudes of correct scale. Defaults to (20, 50).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'aea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(AlbersEqualArea, self).__init__(proj4_params, globe=globe)
# bounds
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
tmp = np.linspace(minlon, maxlon, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class AzimuthalEquidistant(Projection):
"""
An Azimuthal Equidistant projection
This projection provides accurate angles about and distances through the
central position. Other angles, distances, or areas may be distorted.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The true longitude of the central meridian in degrees.
Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees.
Defaults to 0.
false_easting: optional
X offset from the planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from the planar origin in metres. Defaults to 0.
globe: optional
An instance of :class:`cartopy.crs.Globe`. If omitted, a default
globe is created.
"""
# Warn when using Azimuthal Equidistant with proj < 4.9.2 due to
# incorrect transformation past 90 deg distance (see
# https://github.com/OSGeo/proj.4/issues/246).
if PROJ4_VERSION != ():
if PROJ4_VERSION < (4, 9, 2):
warnings.warn('The Azimuthal Equidistant projection in Proj '
'older than 4.9.2 incorrectly transforms points '
'farther than 90 deg from the origin. Use this '
'projection with caution.')
else:
warnings.warn('Cannot determine Proj version. The Azimuthal '
'Equidistant projection may be unreliable and '
'should be used with caution.')
proj4_params = [('proj', 'aeqd'), ('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting), ('y_0', false_northing)]
super(AzimuthalEquidistant, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or a)
coords = _ellipse_boundary(a * np.pi, b * np.pi,
false_easting, false_northing, 61)
self._boundary = sgeom.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Sinusoidal(Projection):
"""
A Sinusoidal projection.
This projection is equal-area.
"""
def __init__(self, central_longitude=0.0, false_easting=0.0,
false_northing=0.0, globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'sinu'),
('lon_0', central_longitude),
('x_0', false_easting),
('y_0', false_northing)]
super(Sinusoidal, self).__init__(proj4_params, globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
points = []
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = max(np.abs(self.x_limits + self.y_limits)) * 1e-5
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
# MODIS data products use a Sinusoidal projection of a spherical Earth
# http://modis-land.gsfc.nasa.gov/GCTP.html
Sinusoidal.MODIS = Sinusoidal(globe=Globe(ellipse=None,
semimajor_axis=6371007.181,
semiminor_axis=6371007.181))
class EquidistantConic(Projection):
"""
An Equidistant Conic projection.
This projection is conic and equidistant, and the scale is true along all
meridians and along one or two specified standard parallels.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
standard_parallels=(20.0, 50.0), globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The true latitude of the planar origin in degrees. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
standard_parallels: optional
The one or two latitudes of correct scale. Defaults to (20, 50).
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'eqdc'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
if standard_parallels is not None:
try:
proj4_params.append(('lat_1', standard_parallels[0]))
try:
proj4_params.append(('lat_2', standard_parallels[1]))
except IndexError:
pass
except TypeError:
proj4_params.append(('lat_1', standard_parallels))
super(EquidistantConic, self).__init__(proj4_params, globe=globe)
# bounds
n = 103
lons = np.empty(2 * n + 1)
lats = np.empty(2 * n + 1)
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
tmp = np.linspace(minlon, maxlon, n)
lons[:n] = tmp
lats[:n] = 90
lons[n:-1] = tmp[::-1]
lats[n:-1] = -90
lons[-1] = lons[0]
lats[-1] = lats[0]
points = self.transform_points(self.as_geodetic(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _BoundaryPoint(object):
def __init__(self, distance, kind, data):
"""
A representation for a geometric object which is
connected to the boundary.
Parameters
----------
distance: float
The distance along the boundary that this object
can be found.
kind: bool
Whether this object represents a point from the
pre-computed boundary.
data: point or namedtuple
The actual data that this boundary object represents.
"""
self.distance = distance
self.kind = kind
self.data = data
def __repr__(self):
return '_BoundaryPoint(%r, %r, %s)' % (self.distance, self.kind,
self.data)
def _find_first_ge(a, x):
for v in a:
if v.distance >= x:
return v
# We've gone all the way around, so pick the first point again.
return a[0]
def epsg(code):
"""
Return the projection which corresponds to the given EPSG code.
The EPSG code must correspond to a "projected coordinate system",
so EPSG codes such as 4326 (WGS-84) which define a "geodetic coordinate
system" will not work.
Note
----
The conversion is performed by querying https://epsg.io/ so a
live internet connection is required.
"""
import cartopy._epsg
return cartopy._epsg._EPSGProjection(code)
| lgpl-3.0 |
jskDr/jamespy_py3 | medic/kdl_cl.py | 1 | 40483 | """
KDL - deep learning for medic
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import convolve2d, fftconvolve
from sklearn import preprocessing, model_selection, metrics
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras import backend as K
from keras import callbacks
import kkeras
def fig2array(fig):
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# ones_255 = np.ones_like( data) * 255
# data = 255 - data
return data
def _gen_cell_r0(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r1(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
# print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r2(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_db_r0(N=5, rand_pos_cell=False, disp=False):
db_l = []
cell_img_org = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
for i in range(N):
if disp: # 1, 2, True (not 0 or False)
print('Iteration:', i)
elif disp == 2:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(bd_on=True, rand_pos_cell=rand_pos_cell)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
print("The end.")
return db_l
def gen_cell_db(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd = {'mean': 5, 'std': 1}
else:
stat_ext_bd = None
db_l = []
cell_img_org = gen_cell(bd_on=False,
rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
for i in range(N):
if disp:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(
bd_on=False, rand_pos_cell=rand_pos_cell,
max_bd=max_bd,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(
bd_on=True, rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db(db_l, fname_gz="sheet.gz/cell_db.cvs.gz"):
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["celltype"] = celltype
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
# ===================================
# Functions for the Center_Cell mode
# - gen_cell_n_beads,
# gen_cell_db_center_cell,
# save_cell_db_center_cell
# ===================================
def gen_cell(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
return gen_cell_n_beads(bd_on=bd_on,
rand_pos_cell=rand_pos_cell,
r_cell=r_cell, # 0<r_cell<=1
r_bd=r_bd, # 0<r_bd<=1
max_bd=max_bd, # max_bd >= 1
rand_bead_flag=True, # This is onlu changed part.
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd=stat_ext_bd,
bound_flag=bound_flag,
visible=visible,
disp=disp,
fig=fig,
ax=ax)
def gen_cell_n_beads(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd)+1
else:
# Now, the number of total beads attached a cell is fixed (not random).
final_max_bd = max_bd
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def gen_cell_db_center_cell(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd={'mean':5, 'std':1}
else:
stat_ext_bd=None
db_l = []
for i in range(N):
if disp:
print(i, end=",")
# no_beads is circulated from 0 to max_bd-1
# Hence, gen_cell_no_beads should be prepared.
n_beads = i % max_bd
cellbd_img = gen_cell_n_beads(
bd_on=True,
rand_pos_cell=rand_pos_cell,
max_bd=n_beads, # max_bd is repeated from 0 to max_bd
rand_bead_flag=False,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db_center_cell(db_l, max_bd, fname_gz="sheet.gz/cell_db.cvs.gz"):
"""
Each image include a cell at the center location and
the numbers of beads in a cell is equally distributed
circulately. That is, 0 beads, 1 beads, ..., max_bd are repeated
for all images.
"""
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["n_beads"] = [i % max_bd] * np.prod(db.shape)
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
def gen_save_cell_db(N=5, fname_gz="sheet.gz/cell_db.cvs.gz",
extra_bead_on=True, rand_pos_cell=False,
max_bd=3,
classification_mode="Cancer_Normal_Cell",
disp=False):
"""
- Image show without pausing is needed. (Oct 31, 2016)
Parameters
==========
rand_pos_cell, Default=False
If it is True, the position of cell is varied
Otherwise, the position is fixed to be the center (0,0).
max_bd, int, default=3
The number of the maximum beads attached to a cell.
classification_mode, string, default="Cancer_Normal"
if it is "Cancer_Normal_Cell", this function classifies cancer or normal.
If it is "Center_Cell", this fucntion classifies numer of beads in each cell.
In this case, the number of beads in cells are equaly distributed
from 0 to max_bd. For example, if N=100 & max_bd=4, 0-beads,
1-beads, 2-beads and 3-beads cell images are repeated 25 times.
"""
def save(save_fn, db_l, max_bd=None, fname_gz=None):
fname_gz_fold, fname_gz_file = os.path.split(fname_gz)
os.makedirs(fname_gz_fold, exist_ok=True)
if max_bd is None:
cell_df = save_fn(db_l, fname_gz=fname_gz)
else:
cell_df = save_fn(db_l, max_bd, fname_gz=fname_gz)
return cell_df
if classification_mode == "Cancer_Normal_Cell":
db_l = gen_cell_db(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
# classification_mode=classification_mode,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db, db_l, fname_gz=fname_gz)
elif classification_mode == "Center_Cell":
assert int(N % max_bd) == 0, "N % max_bd should zero in the Center_Cell mode"
db_l = gen_cell_db_center_cell(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db_center_cell, db_l, max_bd,
fname_gz=fname_gz)
else:
raise ValueError("classification_mode = {} is not supported.".format(classification_mode))
return cell_df
class obj:
def __init__(self, r, L=144):
"""
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
"""
# Initial values
self.Lx, self.Ly = L, L
self.downsamples = 4
self.d_um = 2.2 / self.downsamples
# Input and generated values
self.r = r
self.r_pixels_x = self.r * self.Lx
self.r_pixels_y = self.r * self.Ly
self.r_x_um = self.r_pixels_x * self.d_um
self.r_y_um = self.r_pixels_y * self.d_um
def get_h2d(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(1j * k * z) / (1j * l * z) * np.exp((1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
def get_h2d_inv(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(-1j * k * z) / (1j * l * z) * np.exp((-1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
# Freznel
def get_h(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def get_h_inv(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((-1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def fd_conv(Img_xy, h2d, mode ='same'):
#return convolve2d(Img_xy, h2d, mode=mode)
return fftconvolve(Img_xy, h2d, mode=mode)
def cell_fd_info(cell_df):
Lx = cell_df['x'].max() + 1
Ly = cell_df['y'].max() + 1
Limg = cell_df['ID'].max() + 1
#print( Lx, Ly, Limg)
return Limg, Lx, Ly
def cell_fd_conv(cell_df, h144=None):
Limg, Lx, Ly = cell_fd_info(cell_df)
if h144 is None:
h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)
cell_img_fd_l = []
for l in range(Limg):
cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
#cell_img_fd = fd_conv(cell_img, h144)
cell_img_fd = fftconvolve(cell_img, h144, mode='same')
cell_img_fd_l.append(cell_img_fd)
cell_img_fd_a = np.array(cell_img_fd_l)
#print( cell_img_fd_a.shape)
return cell_img_fd_a
def cell_fd_extention(fname_org='sheet.gz/cell_db.cvs.gz', camera_bit_resolution=14):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
def cell_fd_ext_save(fname_org='sheet.gz/cell_db100.cvs.gz',
fname_ext='sheet.gz/cell_fd_db100.cvs.gz'):
cell_df_ext = cell_fd_extention(fname_org)
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
class CELL_FD_EXT():
def __init__(self, fname_org, h2d=None, h2d_inv=None):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
if h2d is None:
h2d = get_h(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
if h2d_inv is None:
h2d_inv = get_h_inv(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
self.fname_org = fname_org
self.h2d = h2d
self.h2d_inv = h2d_inv
def save(self):
fname_org = self.fname_org
fname_ext = fname_org[:-7] + '_fd' + fname_org[-7:]
print('fname_ext is', fname_ext)
cell_df_ext = self.extention()
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
def extention(self, camera_bit_resolution=14):
fname_org = self.fname_org
h2d = self.h2d
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df, h2d)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
#Deep Learning
def _run_dl_mgh_params_r0(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
#Deep Learning
def run_dl_mgh_params(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
def run_dl_mgh_params_2cl(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
def run_dl_mgh_params_2cl_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
"""
Dropout is also included after batchnormalization to protect
overfitting.
"""
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
"""
Fresenel Diffraction with a new approach
"""
def f(x_um, y_um, z_mm=0.5, l_nm=405):
return np.exp(1j * np.pi * (np.power(x_um, 2) + np.power(y_um,2)) / (l_nm * z_mm))
def cimshow(f_impulse):
plt.figure(figsize=(7,5))
plt.subplot(2,2,1)
plt.imshow(np.real(f_impulse))
plt.colorbar()
plt.title('Re{}')
plt.subplot(2,2,2)
plt.imshow(np.imag(f_impulse))
plt.colorbar()
plt.title('Img{}')
plt.subplot(2,2,2+1)
plt.imshow(np.abs(f_impulse))
plt.colorbar()
plt.title('Magnitude')
plt.subplot(2,2,2+2)
plt.imshow(np.angle(f_impulse))
plt.colorbar()
plt.title('Phase')
def xy(MAX_x_um = 55, pixel_um=2.2, oversample_rate=4):
N = int(MAX_x_um / (pixel_um / oversample_rate))
x = np.dot(np.ones((N,1)), np.linspace(-MAX_x_um,MAX_x_um,N).reshape(1,-1))
y = np.dot(np.linspace(-MAX_x_um,MAX_x_um,N).reshape(-1,1), np.ones((1,N)))
return x, y
def u(x, y, alpha):
out = np.zeros_like(x)
out[(y>=-alpha/2)&(y<=alpha/2)&(x>=-alpha/2)&(x<=alpha/2)] = 1.0
return out
def u_circle(x,y,radius):
xy2 = np.power(x,2) + np.power(y,2)
# Since x is already matrix for griding, out shape is copied just from x.
# If x is a vector, the shape of out should be redefined to have 2-D form.
out = np.zeros_like(x)
out[xy2<=np.power(radius,2)] = 1.0
return out
# Code for generation H in frequency domain: H <--> h
# Gbp(n,m) = exp(1i*k*Dz*sqrt(1-lambda^2*fx(n,m)^2-lambda^2*fy(n,m)^2))
class GenG():
def upsampling(self, Pow2factor, dx1):
"""
Utility codes
"""
dx2 = dx1 / (2**Pow2factor)
return dx2
def __init__(self, NxNy=(144, 144), Dz_mm=0.5, delta_um = 2.2, UpsampleFactor=2, lambda_nm=405):
"""
oversample=2^UpsampleFactor
"""
delta_m = delta_um * 1e-6
delta2_m = self.upsampling(UpsampleFactor, delta_m)
Nx, Ny = NxNy
dfx = 1/(Nx*delta2_m)
dfy = 1/(Ny*delta2_m)
x = np.arange(-Ny/2, Ny/2)*dfy
y = np.arange(-Nx/2, Nx/2)*dfx
self.xv, self.yv = np.meshgrid(x, y)
self.lambda_m = lambda_nm * 1e-9
self.k_rad = 2*np.pi/self.lambda_m
self.Dz_m = Dz_mm * 1e-3
def bp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2)))
def fp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(-1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2))) | mit |
davidzchen/tensorflow | tensorflow/python/keras/engine/data_adapter.py | 1 | 53018 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import functools
import itertools
import math
import random
import numpy as np
import six
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
try:
from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pd = None
@six.add_metaclass(abc.ABCMeta)
class DataAdapter(object):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a lot
of assumptions under the hood, eg eager context by default, distribution
strategy, etc. In the meantime, some legacy feature support might be dropped,
eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide.
"""
if not self.can_handle(x, y):
raise ValueError("{} Cannot handle input {}, {}".format(
self.__class__, x, y))
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller might
need to create new iterator for the same dataset at the beginning of the
epoch. This behavior might change in future.
Returns:
An tf.dataset.Dataset. Caller might use the dataset in different
context, eg iter(dataset) in eager to get the value directly, or in graph
mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg for
Numpy data, the size is same as (number_of_element / batch_size). Whereas
for dataset or python generator, the size is unknown since it may or may not
have a end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown. The
caller could use this to control the loop of training, show progress bar,
or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
@abc.abstractmethod
def should_recreate_iterator(self):
"""Returns whether a new iterator should be created every epoch."""
raise NotImplementedError
def get_samples(self):
"""Returns number of samples in the data, or `None`."""
if not self.get_size() or not self.batch_size():
return None
total_sample = self.get_size() * self.batch_size()
if self.has_partial_batch():
total_sample -= (self.batch_size() - self.partial_batch_size())
return total_sample
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs):
super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs)).pop()
_check_data_cardinality(inputs)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = dataset_ops.DatasetV2.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices rather
# than reusing the same range Tensor. (presumably because of buffer
# forwarding.)
indices = math_ops.range(num_samples, dtype=dtypes.int64)
if shuffle and shuffle != "batch":
indices = random_ops.random_shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take quite
# a while so we don't want to wait for prefetching over an epoch boundary to
# trigger the next permutation. On the other hand, too many simultaneous
# shuffles can contend on a hardware level and degrade all performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is to
slice the Tensor in a Dataset map. (With a condition on the upper index to
handle the partial batch.) However it turns out that coercing the Tensor
into a shape which is divisible by the batch size (and handling the last
partial batch separately) allows for a much more favorable memory access
pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])
first_k_indices = array_ops.reshape(
first_k_indices, [num_full_batches, batch_size])
flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(
indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return nest.map_structure(random_ops.random_shuffle, batch)
dataset = dataset.map(shuffle_batch)
self._dataset = dataset
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = dataset_ops.DatasetV2.zip((
indices_dataset,
dataset_ops.DatasetV2.from_tensors(inputs).repeat()
))
def grab_batch(i, data):
return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)
dataset = dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of (unnecessary)
# input pipeline graph serialization and deserialization
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
This adapter handles array-like datasets that may be too big to fully
fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__") and
hasattr(v, "shape") and
hasattr(v, "dtype") and
hasattr(v, "__len__")
)
if (not TensorLikeDataAdapter.can_handle(x, y) and
not CompositeTensorDataAdapter.can_handle(x, y)):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warn(
"Keras is training/fitting/evaluating on array-like data. Keras may "
"not be optimized for this format, so if your input data format is "
"supported by TensorFlow I/O (https://github.com/tensorflow/io) we "
"recommend using that to load a Dataset instead.")
super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(data, ind.numpy(),
contiguous=contiguous)
return [slice_array(inp) for inp in flat_inputs]
flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset inherits from CompositeTensor but shouldn't be handled here.
if (tf_utils.is_extension_type(v) and
not isinstance(v, dataset_ops.DatasetV2)):
return True
# Support Scipy sparse tensors if scipy is installed
if scipy_sparse is not None and scipy_sparse.issparse(v):
return True
return False
def _is_tensor_or_composite(v):
if isinstance(v, (ops.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, (float, int, str, bytes, bytearray)):
return True
if isinstance(inp, (list, tuple)):
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
**kwargs):
super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
def should_recreate_iterator(self):
return True
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) or
_is_distributed_dataset(x))
def __init__(self,
x,
y=None,
sample_weights=None,
steps=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the user
# provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(y, sample_weights, steps)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (self._user_steps is None or
cardinality.cardinality(self._dataset).numpy() == self._user_steps)
def _validate_args(self, y, sample_weights, steps):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if steps is None:
if _is_distributed_dataset(self._dataset):
raise ValueError("When providing a distributed dataset, you must "
"specify the number of steps to run.")
size = cardinality.cardinality(self._dataset).numpy()
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When providing an infinite dataset, you must specify "
"the number of steps to run (if you did not intend to "
"create an infinite dataset, make sure to not call "
"`repeat()` on the dataset).")
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
# Generators should never shuffle as exhausting the generator in order to
# shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,))
self._first_batch_size = int(nest.flatten(peek)[0].shape[0])
def _get_dynamic_shape(t):
shape = t.shape
# Unknown number of dimensions, `as_list` cannot be called.
if shape.rank is None:
return shape
return tensor_shape.TensorShape([None for _ in shape.as_list()])
output_shapes = nest.map_structure(_get_dynamic_shape, peek)
output_types = nest.map_structure(lambda t: t.dtype, peek)
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing,
max_queue_size)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = dataset_ops.DatasetV2.from_generator(
wrapped_generator, output_types, output_shapes=output_shapes)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = nest.list_to_tuple(data)
def _convert_dtype(t):
if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)):
return np.array(t, dtype=backend.floatx())
return t
data = nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(self,
x,
y=None,
sample_weights=None,
shuffle=False,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"`keras.utils.Sequence` as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input.")
self._size = len(x)
self._shuffle_sequence = shuffle
self._keras_sequence = x
self._enqueuer = None
super(KerasSequenceAdapter, self).__init__(
x,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
model=model,
**kwargs)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
self._enqueuer = data_utils.OrderedEnqueuer(
x, use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence)
self._enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return self._enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return self._size
def should_recreate_iterator(self):
return True
def on_epoch_end(self):
if self._enqueuer:
self._enqueuer.stop()
self._keras_sequence.on_epoch_end()
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter, TensorLikeDataAdapter,
GenericArrayLikeDataAdapter, DatasetAdapter,
GeneratorDataAdapter, KerasSequenceAdapter, CompositeTensorDataAdapter,
]
def select_data_adapter(x, y):
"""Selects a data adapter than can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle "
"input: {}, {}".format(
_type_name(x), _type_name(y)))
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(
adapter_cls, _type_name(x), _type_name(y)))
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x))
def _process_tensorlike(inputs):
"""Process tensor-like inputs.
This function:
(1) Converts `Numpy` arrays to `Tensor`s.
(2) Converts `Scipy` sparse matrices to `SparseTensor`s.
(2) Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.
Returns:
Structure of `Tensor`s or tensor-like.
"""
def _convert_numpy_and_scipy(x):
if isinstance(x, np.ndarray):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
elif scipy_sparse and scipy_sparse.issparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
return nest.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weight_modes structure with output structure."""
if target_structure is None or not nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes))
except (ValueError, TypeError):
target_str = str(nest.map_structure(lambda _: "...", target_structure))
mode_str = str(nest.map_structure(lambda _: "...", sample_weight_modes))
# Attempt to coerce sample_weight_modes to the target structure. This
# implicitly depends on the fact that Model flattens outputs for its
# internal representation.
try:
sample_weight_modes = nest.pack_sequence_as(
target_structure, nest.flatten(sample_weight_modes))
logging.warning(
"sample_weight modes were coerced from\n {}\n to \n {}"
.format(target_str, mode_str))
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(target_str, mode_str))
return sample_weight_modes
class DataHandler(object):
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None):
self._initial_epoch = initial_epoch
self._epochs = epochs
self._insufficient_data = False
self._model = model
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = 1
self._steps_per_execution_value = 1
else:
self._steps_per_execution = steps_per_execution
self._steps_per_execution_value = steps_per_execution.numpy().item()
adapter_cls = select_data_adapter(x, y)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=ds_context.get_strategy(),
model=model)
strategy = ds_context.get_strategy()
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
if not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._current_step = 0
self._step_increment = self._steps_per_execution_value - 1
self._insufficient_data = False
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None and
self._steps_per_execution_value > self._inferred_steps)
original_value = self._steps_per_execution_value
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
self._steps_per_execution_value = self._inferred_steps
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
self._steps_per_execution_value = original_value
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
context.async_wait()
except (StopIteration, errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate at "
"least `steps_per_epoch * epochs` batches (in this case, "
"{} batches). You may need to use the repeat() function "
"when building your dataset.".format(total_epochs *
self._inferred_steps))
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (self._inferred_steps is None or
self._current_step < self._inferred_steps):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
can_run_full_execution = (
self._steps_per_execution_value == 1 or
self._inferred_steps is None or
self._inferred_steps - self._current_step >=
self._steps_per_execution_value)
if can_run_full_execution:
self._step_increment = self._steps_per_execution_value - 1
yield self._current_step
self._current_step += self._steps_per_execution_value
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(self._steps_per_execution_value)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
size = cardinality.cardinality(dataset)
if size == cardinality.INFINITE and steps is None:
raise ValueError("When passing an infinitely repeating dataset, you "
"must specify how many steps to draw.")
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if self._steps_per_execution_value > 1 and self._inferred_steps is None:
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of steps "
"to run.")
def _make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Arguments:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
class_ids = list(sorted(class_weight.keys()))
expected_class_ids = list(range(len(class_ids)))
if class_ids != expected_class_ids:
error_msg = (
"Expected `class_weight` to be a dict with keys from 0 to one less "
"than the number of classes, found {}").format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = ops.convert_to_tensor_v2_with_dispatch(
[class_weight[int(c)] for c in class_ids])
def _class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = unpack_x_y_sample_weight(data)
if nest.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single output.")
if y.shape.rank > 2:
raise ValueError("`class_weight` not supported for "
"3+ dimensional targets.")
y_classes = smart_cond.smart_cond(
y.shape.rank == 2 and backend.shape(y)[1] > 1,
lambda: backend.argmax(y, axis=1),
lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))
cw = array_ops.gather_v2(class_weight_tensor, y_classes)
if sw is not None:
cw = math_ops.cast(cw, sw.dtype)
sw, cw = expand_1d((sw, cw))
# `class_weight` and `sample_weight` are multiplicative.
sw = sw * cw
else:
sw = cw
return x, y, sw
return _class_weights_map_fn
def expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s."""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, ops.Tensor) and
isinstance(t.shape, tensor_shape.TensorShape) and t.shape.rank == 1):
return array_ops.expand_dims_v2(t, axis=-1)
return t
return nest.map_structure(_expand_single_1d_tensor, data)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Arguments:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
return isinstance(t, tensor_types) or t is None
flat_arrays = nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable))
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1. - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not sufficient "
"to split it into a validation and training set as specified by "
"`validation_split={validation_split}`. Either provide more data, or a "
"different value for the `validation_split` argument." .format(
batch_dim=batch_dim, validation_split=validation_split))
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays)
val_arrays = nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays)
return train_arrays, val_arrays
@keras_export("keras.utils.unpack_x_y_sample_weight", v1=[])
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = tf.ones((10, 5))
>>> labels_batch = tf.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Example in overridden `Model.train_step`:
```python
class MyModel(tf.keras.Model):
def train_step(self, data):
# If `sample_weight` is not provided, all samples will be weighted
# equally.
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
```
Arguments:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not
provided.
"""
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
else:
error_msg = ("Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
"or `(x, y, sample_weight)`, found: {}").format(data)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight", v1=[])
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Arguments:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unecessary tuple
if not nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def single_batch_iterator(strategy,
x,
y=None,
sample_weight=None,
class_weight=None):
"""Creates a single-batch dataset."""
x, y, sample_weight = _process_tensorlike((x, y, sample_weight))
if y is None:
data = (x,)
elif sample_weight is None:
data = (x, y)
else:
data = (x, y, sample_weight)
_check_data_cardinality(data)
dataset = dataset_ops.DatasetV2.from_tensors(data)
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
dataset = strategy.experimental_distribute_dataset(dataset)
return iter(dataset)
def _check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in nest.flatten(data))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, single_data in zip(["x", "y", "sample_weight"], data):
msg += " {} sizes: {}\n".format(
label, ", ".join(str(i.shape[0]) for i in nest.flatten(single_data)))
msg += "Make sure all arrays contain the same number of samples."
raise ValueError(msg)
def _scipy_sparse_to_sparse_tensor(t):
"""Converts a SciPy sparse matrix to a SparseTensor."""
sparse_coo = t.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
if issubclass(data.dtype.type, np.floating):
data = data.astype(backend.floatx())
indices = np.concatenate(
(np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1)
return sparse_tensor.SparseTensor(indices, data, shape)
def _is_distributed_dataset(ds):
# TODO(b/151165986): Use public APIs.
return isinstance(
ds,
(input_lib.DistributedDataset, input_lib.DistributedDatasetsFromFunction))
| apache-2.0 |
dsm054/pandas | pandas/tests/plotting/test_frame.py | 3 | 123748 | # coding: utf-8
""" Test cases for DataFrame.plot """
import pytest
import string
import warnings
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.core.dtypes.api import is_list_like
from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from numpy.random import rand, randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# mpl >= 1.5.2 (or slightly below) throw AttributError
with pytest.raises((TypeError, AttributeError)):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
result = ax.axes
assert result is axes[0]
# GH 15516
def test_mpl2_color_cycle_str(self):
colors = ['C' + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
for c in colors:
_check_plot_works(df.plot, color=c)
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=['red'])
def test_rgb_tuple_color(self):
# GH 16695
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0))
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color='')
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=['red', 'black'], style=['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ['-', '--']
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ['red', 'black']
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=['red', 'black'], style=['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
assert df.index.name == 'NAME'
@pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'), df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1),
figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)}, index=np.arange(99, -1, -1),
dtype=np.int64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({'y': [0., 1., 2., 3.]}, index=[1., 0., 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., np.nan, 3., 4., 5., 6.]},
index=[1., 0., 3., 2., np.nan, 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., 2., 3.], 'z': [91., 90., 93., 92.]})
ax = df.plot(x='z', y='y')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3, )
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax,
labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_groupby_boxplot_sharey(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14],
'b': [0.56, 0.84, 0.29, 0.56, 0.85],
'c': [0, 1, 2, 3, 1]},
index=[0, 1, 2, 3, 4])
# behavior without keyword
axes = df.groupby('c').boxplot()
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# set sharey=True should be identical
axes = df.groupby('c').boxplot(sharey=True)
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# sharey=False, all yticklabels should be visible
axes = df.groupby('c').boxplot(sharey=False)
expected = [True, True, True, True]
self._assert_ytickslabels_visibility(axes, expected)
def test_groupby_boxplot_sharex(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14],
'b': [0.56, 0.84, 0.29, 0.56, 0.85],
'c': [0, 1, 2, 3, 1]},
index=[0, 1, 2, 3, 4])
# behavior without keyword
axes = df.groupby('c').boxplot()
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# set sharex=False should be identical
axes = df.groupby('c').boxplot(sharex=False)
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# sharex=True, yticklabels should be visible
# only for bottom plots
axes = df.groupby('c').boxplot(sharex=True)
expected = [False, False, True, True]
self._assert_xtickslabels_visibility(axes, expected)
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45,
fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {"numeric": np.array([1, 2, 5]),
"timedelta": [pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h")],
"datetime_no_tz": [pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")],
"datetime_all_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00",
utc=True),
pd.to_datetime("2017-08-02 00:00:00",
utc=True)],
"text": ["This", "should", "fail"]}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (ax_numeric.get_lines()[0].get_data()[1] ==
testdata["numeric"].values).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (ax_timedelta.get_lines()[0].get_data()[1] ==
testdata["timedelta"].values).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (ax_datetime_no_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_no_tz"].values).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_all_tz"].values).all()
with pytest.raises(TypeError):
testdata.plot(y="text")
@pytest.mark.xfail(reason='not support for period, categorical, '
'datetime_mixed_tz',
strict=True)
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formater (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formater (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handels ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {"numeric": np.array([1, 2, 5]),
"period": [pd.Period('2017-08-01 00:00:00', freq='H'),
pd.Period('2017-08-01 02:00', freq='H'),
pd.Period('2017-08-02 00:00:00', freq='H')],
"categorical": pd.Categorical(["c", "b", "a"],
categories=["a", "b", "c"],
ordered=False),
"datetime_mixed_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")]}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (ax_period.get_lines()[0].get_data()[1] ==
testdata["period"].values).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (ax_categorical.get_lines()[0].get_data()[1] ==
testdata["categorical"].values).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric",
y="datetime_mixed_tz")
assert (ax_datetime_mixed_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_mixed_tz"].values).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coordinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=['w', 'x', 'y', 'z'])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6),
'x': rand(6),
'y': -rand(6),
'z': -rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1,
'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False]))
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=['x', 'y', 'z', 'four'])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame({"A": range(4),
"B": range(1, 5),
"color": ['red', 'blue', 'blue', 'red']})
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y='A', color=df['color'])
result = [p.get_facecolor() for p in ax.patches]
expected = [(1., 0., 0., 1.),
(0., 0., 1., 1.),
(0., 0., 1., 1.),
(1., 0., 0., 1.)]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
position=0.2)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind='bar', stacked=True, width=1)
self._check_bar_alignment(df, kind='barh', stacked=False, width=1)
self._check_bar_alignment(df, kind='barh', stacked=True, width=1)
self._check_bar_alignment(df, kind='bar', subplots=True, width=1)
self._check_bar_alignment(df, kind='barh', subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20],
'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(np.random.randn(6, 5),
index=pd.Index(list('ABCDEF')),
columns=pd.Index(list('abcde')))
# categorical index must behave the same
df2 = pd.DataFrame(np.random.randn(6, 5),
index=pd.CategoricalIndex(list('ABCDEF')),
columns=pd.CategoricalIndex(list('abcde')))
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x='x')
with pytest.raises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array,
columns=['A label', 'B label', 'C label'])
ax1 = df.plot.scatter(x='A label', y='B label')
ax2 = df.plot.scatter(x='A label', y='B label', c='C label')
vis1 = [vis.get_visible() for vis in
ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in
ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in
ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in
ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (ax1.xaxis.get_label().get_visible() ==
ax2.xaxis.get_label().get_visible())
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array,
columns=['A label', 'B label', 'C label'])
ax = df.plot.hexbin('A label', 'B label', gridsize=12)
assert all(vis.get_visible() for vis in
ax.xaxis.get_minorticklabels())
assert all(vis.get_visible() for vis in
ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
@pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array,
columns=['A label', 'B label', 'C label'])
fig, axes = plt.subplots(1, 2)
df.plot.scatter('A label', 'B label', c='C label', ax=axes[0])
df.plot.scatter('A label', 'B label', c='C label', ax=axes[1])
plt.tight_layout()
points = np.array([ax.get_position().get_points()
for ax in fig.axes])
axes_x_coords = points[:, :, 0]
parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance,
colorbar_distance, atol=1e-7).all()
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self):
# GH 16199
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['a', 'b', 'a', 'c'])})
with pytest.raises(ValueError) as ve:
df.plot(x='x', y='y', kind='scatter')
ve.match('requires y column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='x', kind='scatter')
ve.match('requires x column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='y', kind='scatter')
ve.match('requires x column to be numeric')
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == 'Greys'
# n.b. there appears to be no public method
# to get the colorbar label
assert ax.collections[0].colorbar._label == 'z'
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0]
.get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64))
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center', width=0.5,
position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position, grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min(p.get_x() for p in ax.patches)
max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min(p.get_y() for p in ax.patches)
max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(
df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == 'center':
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
align='edge')
@pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([.1, 1., 10., 100])
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([.1, 1., 10., 100., 1000., 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# different warning on py3
if not PY3:
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box,
subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box() # default axes
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(result, None, expected_keys=[
'height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind='kde',
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
if plotting._compat._mpl_ge_2_2_0():
kwargs = {"density": True}
else:
kwargs = {"normed": True}
ax = series.plot.hist(cumulative=True, bins=4, **kwargs)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y,
check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h,
check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x,
check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w,
check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True,
orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True,
orientation='horizontal')
self._check_box_coord(axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(
df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False, label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b', 'LABEL_c'])
assert df5.columns.tolist() == ['b', 'c']
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@pytest.mark.slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^',
1: '+',
2: 'o'}, {0: '^',
1: '+'}, ['^', '+', 'o'], ['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == 'None'
@pytest.mark.slow
@tm.capture_stdout
def test_line_colors(self):
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(color=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ['r', 'g', 'b']
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
linecolors = jet_with_alpha
self._check_colors(handles[:len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind='kde', color='DodgerBlue',
subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = 'k'
self._check_colors(bp['boxes'],
linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'],
linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'],
linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'],
linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'],
linecolors=[caps_c] * len(bp['caps']))
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0],
default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0),
(0, 1, 0), '#123456')
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
import cycler
colors = list('rgbk')
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind='aasdf')
@pytest.mark.parametrize("x,y,lbl", [
(['B', 'C'], 'A', 'a'),
(['A'], ['B', 'C'], ['b', 'c']),
('A', ['B', 'C'], 'badlabel')
])
def test_invalid_xy_args(self, x, y, lbl):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame({"A": [1, 2], 'B': [3, 4], 'C': [5, 6]})
with pytest.raises(ValueError):
df.plot(x=x, y=y, label=lbl)
@pytest.mark.parametrize("x,y", [
('A', 'B'),
(['A'], 'B')
])
def test_invalid_xy_args_dup_cols(self, x, y):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list('AAB'))
with pytest.raises(ValueError):
df.plot(x=x, y=y)
@pytest.mark.parametrize("x,y,lbl,colors", [
('A', ['B'], ['b'], ['red']),
('A', ['B', 'C'], ['b', 'c'], ['red', 'blue']),
(0, [1, 2], ['bokeh', 'cython'], ['green', 'yellow'])
])
def test_y_listlike(self, x, y, lbl, colors):
# GH 19699: tests list-like y and verifies lbls & colors
df = DataFrame({"A": [1, 2], 'B': [3, 4], 'C': [5, 6]})
_check_plot_works(df.plot, x='A', y=y, label=lbl)
ax = df.plot(x=x, y=y, label=lbl, color=colors)
assert len(ax.lines) == len(y)
self._check_colors(ax.get_lines(), linecolors=colors)
@pytest.mark.parametrize("x,y,colnames", [
(0, 1, ['A', 'B']),
(1, 0, [0, 1])
])
def test_xy_args_integer(self, x, y, colnames):
# GH 20056: tests integer args for xy and checks col names
df = DataFrame({"A": [1, 2], 'B': [3, 4]})
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
assert len(ax.collections) == 1
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
assert ax.collections[0].cmap.name == 'BuGn'
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
assert ax.collections[0].cmap.name == 'YlGn'
with pytest.raises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn', colormap='BuGn')
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True, labels=labels,
colors=color_args)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert ([x.get_text() for x in ax.get_legend().get_texts()] ==
base_expected[:i] + base_expected[i + 1:])
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'],
xerr=df_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(df.plot,
yerr=df_err, xerr=df_err,
subplots=True,
kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df + 1).plot, yerr=df_err,
xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot,
yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'z': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(tdf.plot,
kind=kind, yerr=tdf_err,
subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
ax = df.plot(yerr=err, xerr=err / 2)
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
@td.xfail_if_mpl_2_2
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err,
yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
lines = []
errs = [c.lines
for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines)))
# GH 8081
df = DataFrame(
np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._core._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}),
plotting._core._dataframe_kinds, kws={'x': 'a', 'y': 'b'})
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with pytest.raises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
try: # mpl 1.5
with mpl.rc_context(
rc={'axes.prop_cycle': mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
except (AttributeError, KeyError): # mpl 1.4
with mpl.rc_context(rc={'axes.color_cycle': color_tuples}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize('method', ['line', 'barh', 'bar'])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (pd.DataFrame(np.random.randn(15, 2),
columns=list('AB'))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1))
fontsize = 20
sy = ['C', 'D']
kwargs = dict(secondary_y=sy, fontsize=fontsize,
mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax,
ylabelsize=fontsize)
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| bsd-3-clause |
untom/scikit-learn | sklearn/ensemble/weight_boosting.py | 30 | 40648 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/io/test_pytables.py | 1 | 192388 | from contextlib import contextmanager
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
import os
import tempfile
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import (
PY35, PY36, BytesIO, is_platform_little_endian, is_platform_windows,
lrange, range, text_type, u)
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex,
RangeIndex, Series, Timestamp, bdate_range, compat, concat, date_range,
isna, timedelta_range)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_series_equal, set_timezone)
from pandas.io import pytables as pytables # noqa:E402
from pandas.io.formats.printing import pprint_thing
from pandas.io.pytables import (
ClosedFileError, HDFStore, PossibleDataLossError, Term, read_hdf)
from pandas.io.pytables import TableIterator # noqa:E402
tables = pytest.importorskip('tables')
# TODO:
# remove when gh-24839 is fixed; this affects numpy 1.16
# and pytables 3.4.4
xfail_non_writeable = pytest.mark.xfail(
LooseVersion(np.__version__) >= LooseVersion('1.16'),
reason=('gh-25511, gh-24839. pytables needs a '
'release beyong 3.4.4 to support numpy 1.16x'))
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except OSError:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except IOError:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except (ValueError, KeyError):
pass
class Base(object):
@classmethod
def setup_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def teardown_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setup_method(self, method):
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def teardown_method(self, method):
pass
@pytest.mark.single
class TestHDFStore(Base):
def test_format_kwarg_in_constructor(self):
# GH 13291
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, HDFStore, path, format='table')
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series', o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series', o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path, 'table', append=True)
result = read_hdf(path, 'table', where=['index>2'])
assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True)
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True)
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', append=False, format='fixed')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False, format='f')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False)
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=True, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# append to False
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# formats
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select('df'), df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='f')
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='fixed')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=True, format='foo')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=False, format='bar')
# File path doesn't exist
path = ""
pytest.raises(compat.FileNotFoundError,
read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
_maybe_remove(store, 'df')
store.put('df', df)
assert not store.get_storer('df').is_table
pytest.raises(ValueError, store.append, 'df2', df)
pd.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
assert store.get_storer('df').is_table
_maybe_remove(store, 'df2')
store.append('df2', df)
assert store.get_storer('df').is_table
pd.set_option('io.hdf.default_format', None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
df.to_hdf(path, 'df')
with HDFStore(path) as store:
assert not store.get_storer('df').is_table
pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True)
pd.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
assert store.get_storer('df3').is_table
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
assert store.get_storer('df4').is_table
pd.set_option('io.hdf.default_format', None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
assert len(store) == 3
expected = {'/a', '/b', '/c'}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
assert list(store) == []
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store.info()
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store['df'] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df)
s = store.get_storer('df')
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
assert 'a' in store
assert 'b' in store
assert 'c' not in store
assert 'foo/bar' in store
assert '/foo/bar' in store
assert '/foo/b' not in store
assert 'bar' not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store['node())'] = tm.makeDataFrame()
assert 'node())' in store
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
assert store.root.a._v_attrs.pandas_version == '0.15.2'
assert store.root.b._v_attrs.pandas_version == '0.15.2'
assert store.root.df1._v_attrs.pandas_version == '0.15.2'
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node('df2')._v_attrs.pandas_version = None
pytest.raises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r', 'r+']:
pytest.raises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r', 'r+']:
def f():
with HDFStore(path, mode=mode) as store: # noqa
pass
pytest.raises(IOError, f)
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r', 'r+']:
pytest.raises(IOError, df.to_hdf,
path, 'df', mode=mode)
df.to_hdf(path, 'df', mode='w')
else:
df.to_hdf(path, 'df', mode=mode)
# conv read
if mode in ['w']:
pytest.raises(ValueError, read_hdf,
path, 'df', mode=mode)
else:
result = read_hdf(path, 'df', mode=mode)
assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
result = read_hdf(path, 'df')
assert_frame_equal(result, df)
check('r')
check('r+')
check('a')
check('w')
check_default_mode()
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
pytest.raises(PossibleDataLossError, store.open, 'w')
store.close()
assert not store.is_open
# truncation ok here
store.open('w')
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
assert store.is_open
assert len(store) == 1
assert store._mode == 'r'
store.close()
assert not store.is_open
# reopen as append
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
# reopen as append (again)
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path, mode='a', driver='H5FD_CORE',
driver_core_backing_store=0)
store['df'] = df
store.append('df2', df)
tm.assert_frame_equal(store['df'], df)
tm.assert_frame_equal(store['df2'], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
pytest.raises(KeyError, store.get, 'b')
@pytest.mark.parametrize('where, expected', [
('/', {
'': ({'first_group', 'second_group'}, set()),
'/first_group': (set(), {'df1', 'df2'}),
'/second_group': ({'third_group'}, {'df3', 's1'}),
'/second_group/third_group': (set(), {'df4'}),
}),
('/second_group', {
'/second_group': ({'third_group'}, {'df3', 's1'}),
'/second_group/third_group': (set(), {'df4'}),
})
])
def test_walk(self, where, expected):
# GH10143
objs = {
'df1': pd.DataFrame([1, 2, 3]),
'df2': pd.DataFrame([4, 5, 6]),
'df3': pd.DataFrame([6, 7, 8]),
'df4': pd.DataFrame([9, 10, 11]),
's1': pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
'a1': np.array([[1, 2, 3], [4, 5, 6]]),
'tb1': np.array([(1, 2, 3), (4, 5, 6)], dtype='i,i,i'),
'tb2': np.array([(7, 8, 9), (10, 11, 12)], dtype='i,i,i')
}
with ensure_clean_store('walk_groups.hdf', mode='w') as store:
store.put('/first_group/df1', objs['df1'])
store.put('/first_group/df2', objs['df2'])
store.put('/second_group/df3', objs['df3'])
store.put('/second_group/s1', objs['s1'])
store.put('/second_group/third_group/df4', objs['df4'])
# Create non-pandas objects
store._handle.create_array('/first_group', 'a1', objs['a1'])
store._handle.create_table('/first_group', 'tb1', obj=objs['tb1'])
store._handle.create_table('/second_group', 'tb2', obj=objs['tb2'])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = '/'.join([path, leaf])
obj = store.get(frame_path)
if 'df' in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, 'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
pytest.raises(AttributeError, getattr, store, 'd')
for x in ['mode', 'path', 'handle', 'complib']:
pytest.raises(AttributeError, getattr, store, x)
# not stores
for x in ['mode', 'path', 'handle', 'complib']:
getattr(store, "_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
pytest.raises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
# _maybe_remove(store, 'f')
# pytest.raises(ValueError, store.put, 'f', df[10:],
# append=True)
# can't put to a table (use append instead)
pytest.raises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +
["I am a very long string index: %s" % i
for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
@td.skip_if_windows_python_3
def test_put_compression_blosc(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_complibs_default_settings(self):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complevel=9)
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'zlib'
# Set complib and check to see if compression is disabled
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complib='zlib')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(self.path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append('dfc', df, complevel=9, complib='blosc')
store.append('df', df)
store.close()
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where='/dfc', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'blosc'
def test_complibs(self):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version('lzo'):
all_complibs.remove('lzo')
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(self.path) as tmpfile:
gname = 'foo'
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode='r')
for node in h5table.walk_nodes(where='/' + gname,
classname='Leaf'):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
@xfail_non_writeable
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put('df', df)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self):
with ensure_clean_store(self.path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.loc[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({
'u08': Series(np.random.randint(0, high=255, size=5),
dtype=np.uint8),
'u16': Series(np.random.randint(0, high=65535, size=5),
dtype=np.uint16),
'u32': Series(np.random.randint(0, high=2**30, size=5),
dtype=np.uint32),
'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64)}, index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
# 64-bit indices not yet supported
store.append('uints', uint_data, data_columns=[
'u08', 'u16', 'u32'])
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
assert result.name is None
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select('ns', 'foo>60')
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select('ns', 'foo>70 and index<90')
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5, 'C'] = 'bar'
mi.set_index(['C', 'B'], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df', df, format=format)
assert_frame_equal(df, store['df'])
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeIntIndex, tm.makeDateIndex]:
check('table', index)
check('fixed', index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed', tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table', index)
check('fixed', index)
else:
# only support for fixed types (and they have a perf warning)
pytest.raises(TypeError, check, 'table', index)
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
check('fixed', index)
@pytest.mark.skipif(not is_platform_little_endian(),
reason="reason platform is not little endian")
def test_encoding(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo', B='bar'), index=range(5))
df.loc[2, 'A'] = np.nan
df.loc[3, 'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df', Term('columns=A', encoding='ascii'))
tm.assert_frame_equal(result, expected)
def test_latin_encoding(self):
if compat.PY2:
pytest.skip("[unicode] is not implemented as a table column")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pd.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
assert_series_equal(s_nan, retr, check_dtype=False,
check_categorical=False)
else:
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),
'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.loc[:, 'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, 'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.loc[:, 'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pd.set_option('io.hdf.dropna_table', False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pd.set_option('io.hdf.dropna_table', True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar'},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format='table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.iloc[:, :2], axes=['columns'])
store.append('df1', df.iloc[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', 'index=df.index[0:4]'))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select('df1',
'columns=A and index>df.index[4]')
def test_append_with_different_block_ordering(self):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df['index'] = range(10)
df['index'] += i * 10
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1] * len(df), dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index', inplace=True)
store.append('df', df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10, 2),
columns=list('AB'), dtype='float64')
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
store.append('df', df)
# store additional fields in different blocks
df['int16_2'] = Series([1] * len(df), dtype='int16')
pytest.raises(ValueError, store.append, 'df', df)
# store multile additional fields in different blocks
df['float_3'] = Series([1.] * len(df), dtype='float64')
pytest.raises(ValueError, store.append, 'df', df)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
pytest.raises(ValueError, store.append, 'df_new', df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index('C')
store.append('ss', df['B'], min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss'), df['B'])
# same as above, with data_columns=True
store.append('ss2', df['B'], data_columns=True,
min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss2'), df['B'])
# min_itemsize in index without appending (GH 10381)
store.put('ss3', df, format='table',
min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
store.append('ss3', df2)
tm.assert_frame_equal(store.select('ss3'),
pd.concat([df, df2]))
# same as above, with a Series
store.put('ss4', df['B'], format='table',
min_itemsize={'index': 6})
store.append('ss4', df2['B'])
tm.assert_series_equal(store.select('ss4'),
pd.concat([df['B'], df2['B']]))
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.loc[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.loc[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize, size
df = DataFrame(dict(A='foo', B='bar'), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['B', 'A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=[
'B'], min_itemsize={'values': 200})
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
assert store.get_storer('df').data_columns == ['B']
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo', 'foo', 'foo', 'barh',
'barh', 'barh'], columns=['A'])
_maybe_remove(store, 'df')
pytest.raises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
def test_append_with_empty_string(self):
with ensure_clean_store(self.path) as store:
# with all empty strings (GH 12242)
df = DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', '']})
store.append('df', df[:-1], min_itemsize={'x': 1})
store.append('df', df[-1:], min_itemsize={'x': 1})
tm.assert_frame_equal(store.select('df'), df)
def test_to_hdf_with_min_itemsize(self):
with ensure_clean_path(self.path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index('C')
df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
df2.to_hdf(path, 'ss3', append=True, format='table')
tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),
pd.concat([df, df2]))
# same as above, with a Series
df['B'].to_hdf(path, 'ss4', format='table',
min_itemsize={'index': 6})
df2['B'].to_hdf(path, 'ss4', append=True, format='table')
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
@pytest.mark.parametrize(
"format",
[pytest.param('fixed', marks=xfail_non_writeable),
'table'])
def test_to_hdf_errors(self, format):
data = ['\ud800foo']
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(self.path) as path:
# GH 20835
ser.to_hdf(path, 'table', format=format, errors='surrogatepass')
result = pd.read_hdf(path, 'table', errors='surrogatepass')
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc('B')] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indices created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', 'B>0')
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', 'B>0 and index>df.index[3]')
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', "string='foo'")
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'],
min_itemsize={'string': 30, 'string2': 40,
'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc('A')] = 1.
df_new.iloc[0, df_new.columns.get_loc('B')] = -1.
df_new['string'] = 'foo'
sl = df_new.columns.get_loc('string')
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = 'bar'
df_new['string2'] = 'foo'
sl = df_new.columns.get_loc('string2')
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df',
"string='foo' and string2='foo'"
" and A>0 and B<0")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', "string='foo' and string2='cool'")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &
(df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'],
data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
pytest.raises(TypeError, store.create_table_index, 'f2')
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path, 'df', format='table')
result = read_hdf(path, 'df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
('B', 'a'), ('B', 'b')],
names=['first', 'second'])
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df', df)
tm.assert_frame_equal(store['df'], expected,
check_index_type=True,
check_column_type=True)
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
pytest.raises(ValueError, store.put, 'df2', df,
format='table', data_columns=['A'])
pytest.raises(ValueError, store.put, 'df3', df,
format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4),
columns=Index(list('ABCD'), name='foo'))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),
s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index())
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', None, None]))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s', s)
xp = Series(np.zeros(12), index=make_index(
['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 'a', 't']))
pytest.raises(ValueError, store.append, 'df', df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'],
index=make_index(['date', 'date', 'date']))
pytest.raises(ValueError, store.append, 'df', df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 's', 't']))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select(
'df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
def test_mi_data_columns(self):
# GH 14435
idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),
range(5)], names=['date', 'id'])
df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=True)
actual = store.select('df', where='id == 1')
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df)
pytest.raises(TypeError, store.select, 'df', columns=['A'])
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
@xfail_non_writeable
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path, mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result, obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df', df_empty)
pytest.raises(KeyError, store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))
store.append('df', df)
assert_frame_equal(store.select('df'), df)
store.append('df', df_empty)
assert_frame_equal(store.select('df'), df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
pytest.raises(TypeError, store.append, 'df', df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# directly ndarray
pytest.raises(TypeError, store.append, 'df', np.arange(10))
# series directly
pytest.raises(TypeError, store.append,
'df', Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append('df', df)
df['foo'] = 'foo'
pytest.raises(ValueError, store.append, 'df', df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
pytest.raises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes, store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes, store['df_i8'].dtypes)
# incompatible dtype
pytest.raises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(
np.array([[1], [2], [3]], dtype='f4'), columns=['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes, store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame({c: Series(np.random.randint(5), dtype=c)
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']})
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
dtypes = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
dtypes.append(('unicode', u('\\u03c3')))
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
pytest.raises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
@xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion('1.15.0'),
reason=("Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"))
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01',
datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(
Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed', s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table', s)
result = store.select('table')
assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self):
# GH 17618
time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern')
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='fixed')
recons = store['frame']
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
df.loc[3:5, 'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df')
assert_frame_equal(result, df)
result = store.select('df', where="C<100000")
assert_frame_equal(result, df)
result = store.select('df', where="C<pd.Timedelta('-3D')")
assert_frame_equal(result, df.iloc[3:])
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df', "C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result, df.iloc[6:])
result = store.select('df', "C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2', df)
result = store.select('df2')
assert_frame_equal(result, df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
assert len(store) == 1
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
assert len(store) == 0
# nonexistence
pytest.raises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
assert len(store) == 1
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
assert len(store) == 1
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
assert len(store) == 0
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[0:4, 'string'] = 'bar'
store.put('df', df, format='table')
# some invalid terms
pytest.raises(TypeError, Term)
# more invalid
pytest.raises(
ValueError, store.select, 'df', 'df.index[3]')
pytest.raises(SyntaxError, store.select, 'df', 'index>')
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table', data_columns=True)
# check ok
read_hdf(path, 'dfq',
where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, 'dfq', where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table')
pytest.raises(ValueError, read_hdf, path,
'dfq', where="A>0 or C>0")
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),
index=pd.date_range('20130101', periods=20))
store.put('df', df, format='table')
expected = df[df.index > pd.Timestamp('20130105')]
import datetime # noqa
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
result = store.select('df', 'index>datetime(2013,1,5)')
assert_frame_equal(result, expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s.iloc[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.iloc[3:5, 1:3] = np.nan
s.iloc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
@xfail_non_writeable
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal)
@xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
pytest.skip('known failer on some windows platforms')
@xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_frame(self, compression):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=compression)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=compression)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=compression)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
@xfail_non_writeable
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
@xfail_non_writeable
@pytest.mark.parametrize(
'dtype', [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]'])
def test_empty_series(self, dtype):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self):
# GH 13884
df = pd.DataFrame({'A': [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize('UTC')
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
recons = store['frame']
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize('table_format', ['table', 'fixed'])
def test_store_index_name_numpy_str(self, table_format):
# GH #13492
idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1),
datetime.date(2000, 1, 2)]),
name=u('cols\u05d2'))
idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1),
datetime.date(2010, 1, 2)]),
name=u('rows\u05d0'))
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format=table_format)
df2 = read_hdf(path, 'df')
assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == text_type
assert type(df2.columns.name) == text_type
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
tm.assert_series_equal(recons, series)
@xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_store_mixed(self, compression):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=compression)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=['A'])
expected = df.loc[:, ['A']]
assert_frame_equal(result, expected)
# dups across dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['A']]
result = store.select('df', columns=['A'])
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['B', 'A']]
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df', df)
store.append('df', df)
expected = df.loc[:, ['B', 'A']]
expected = concat([expected, expected])
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
arr = np.random.binomial(n=1, p=.01, size=(1000, 10))
df = DataFrame(arr).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1000)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed
# (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
def test_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(
ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])
df['object'] = 'foo'
df.loc[4:5, 'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
result = store.select('df', 'boolv == %s' % str(v),
columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
result = store.select(
'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
store.append('df1', df, data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values'] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values'] > 2.0]
store.append('df2', df, data_columns=True, index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values'] > 2.0]
store.append('df4', df, data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A'] > 0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +
['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select('df', 'B=selector')
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', 'ts=selector')
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [s for s in store.select('df', iterator=True)]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=100)]
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df_non_table')
pytest.raises(TypeError, read_hdf, path,
'df_non_table', chunksize=100)
pytest.raises(TypeError, read_hdf, path,
'df_non_table', iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df', format='table')
results = [s for s in read_hdf(path, 'df', chunksize=100)]
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, 'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1', df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(
columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2', df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [s for s in store.select('df', chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1', periods=3, freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df, result)
for attr in ['freq', 'tz', 'name']:
for idx in ['index', 'columns']:
assert (getattr(getattr(df, idx), attr, None) ==
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
periods=3, freq='D'))))
store.append('data', df2)
assert store.get_storer('data').info['index']['freq'] is None
# this is ok
_maybe_remove(store, 'df2')
df2 = DataFrame(dict(
A=Series(lrange(3),
index=[Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20020101')])))
store.append('df2', df2)
df3 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
store.append('df2', df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1',
periods=3, freq='H'))))
df.to_hdf(path, 'data', mode='w', append=True)
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
df2.to_hdf(path, 'data', append=True)
idx = date_range('2000-1-1', periods=3, freq='H')
idx.name = 'foo'
df = DataFrame(dict(A=Series(lrange(3), index=idx)))
df.to_hdf(path, 'data', mode='w', append=True)
assert read_hdf(path, 'data').index.name == 'foo'
with catch_warnings(record=True):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))
df2.to_hdf(path, 'data', append=True)
assert read_hdf(path, 'data').index.name is None
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
assert crit1.env.scope['date'] == date
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.loc[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.loc[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
pytest.raises(
ValueError, store.select, 'df_time', "index>0")
# can't select if not written as table
# store['frame'] = df
# pytest.raises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4], 'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & '
'index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index > df.index[3]) & (
df.index <= df.index[6])) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string != 'bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
pytest.raises(NotImplementedError,
store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(['A', 'B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select(
'df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=[
'A', 'B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({'A': [1, 1, 2, 2, 3]})
parms.to_hdf(pp, 'df', mode='w',
format='table', data_columns=['A'])
selection = read_hdf(pp, 'df', where='A=[2,3]')
hist = DataFrame(np.random.randn(25, 1),
columns=['data'],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5)
for j in range(5)],
names=['l1', 'l2']))
hist.to_hdf(hh, 'df', mode='w', format='table')
expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')
# sccope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, 'df', where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, 'df', where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df', where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.loc[2:7, 'x'] = ''
store.append('df', df, data_columns=['x'])
result = store.select('df', 'x=none')
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
result = store.select('df', 'x!=none')
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
pprint_thing("[{0}]".format(detail))
pprint_thing(store)
pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
result = store.select('df2', 'x!=none')
expected = df2[isna(df2.x)]
assert_frame_equal(result, expected)
# int ==/!=
df['int'] = 1
df.loc[2:7, 'int'] = 2
store.append('df3', df, data_columns=['int'])
result = store.select('df3', 'int=2')
expected = df[df.int == 2]
assert_frame_equal(result, expected)
result = store.select('df3', 'int!=2')
expected = df[df.int != 2]
assert_frame_equal(result, expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError,
match='No object named df in the file'):
store.select_column('df', 'index')
store.append('df', df)
# error
pytest.raises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where=['index>5'])
pytest.raises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
pytest.raises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.loc[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all())
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all())
result = store.select('df', where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all())
result = store.select('df', where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20000101', periods=1000))
store.append('df', df)
c = store.select_column('df', 'index')
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# invalid
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df), dtype='float64'))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df) + 1))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5)
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range('20000101', periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append('df2', df)
result = store.select('df2', where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2', where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
pytest.raises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df,
selector='df3')
pytest.raises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
pytest.raises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
@pytest.mark.xfail(run=False,
reason="append_to_multiple_dropna_false "
"is not raising as failed")
def test_append_to_multiple_dropna_false(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',
dropna=False)
with pytest.raises(ValueError):
store.select_as_multiple(['df1a', 'df2a'])
assert not store.select('df1a').index.equals(
store.select('df2a').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
pytest.raises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df2'], where=['A>0', 'B>0'],
selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
['df1', 'df2'], where='index>df2.index[4]', selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
pytest.raises(ValueError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion('3.1.0'),
reason=("tables version does not support fix for nan selection "
"bug: GH 4858"))
def test_nan_selection_bug_4858(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)),
dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[
3., 4., 5.]), index=[3, 4, 5])
# write w/o the index on that particular column
store.append('df', df, data_columns=True, index=['cols'])
result = store.select('df', where='values>2.0')
assert_frame_equal(result, expected)
def test_start_stop_table(self):
with ensure_clean_store(self.path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self):
# GH 16209
with ensure_clean_store(self.path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple({'selector': ['foo'], 'data': None}, df,
selector='selector')
result = store.select_as_multiple(['selector', 'data'],
selector='selector', start=0,
stop=1)
expected = df.loc[[0], ['foo', 'bar']]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
# fixed, GH 8287
df = DataFrame(dict(A=np.random.rand(20),
B=np.random.rand(20)),
index=pd.date_range('20130101', periods=20))
store.put('df', df)
result = store.select(
'df', start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select(
'df', start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put('s', s)
result = store.select('s', start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select('s', start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
dfs = df.to_sparse()
store.put('dfs', dfs)
with pytest.raises(NotImplementedError):
store.select('dfs', start=0, stop=5)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = 'columns=df.columns[:75]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = 'columns=df.columns[:75:2]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame({'a': Series([20111010, 20111011, 20111012]),
'b': Series(['ab', 'cd', 'ab'])})
with ensure_clean_store(self.path) as store:
store.append('test_dataset', df)
result = store.select('test_dataset', start=start, stop=stop)
assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
comparator(retrieved, obj)
def test_multiple_open_close(self):
# gh-4409: open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
# single
store = HDFStore(path)
assert 'CLOSED' not in store.info()
assert store.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
pytest.raises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert 'CLOSED' not in store1.info()
assert 'CLOSED' not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert 'CLOSED' in store1.info()
assert not store1.is_open
assert 'CLOSED' not in store2.info()
assert store2.is_open
store2.close()
assert 'CLOSED' in store1.info()
assert 'CLOSED' in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store2.append('df2', df)
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
store = HDFStore(path)
store.close()
pytest.raises(ClosedFileError, store.keys)
pytest.raises(ClosedFileError, lambda: 'df' in store)
pytest.raises(ClosedFileError, lambda: len(store))
pytest.raises(ClosedFileError, lambda: store['df'])
pytest.raises(AttributeError, lambda: store.df)
pytest.raises(ClosedFileError, store.select, 'df')
pytest.raises(ClosedFileError, store.get, 'df')
pytest.raises(ClosedFileError, store.append, 'df2', df)
pytest.raises(ClosedFileError, store.put, 'df3', df)
pytest.raises(ClosedFileError, store.get_storer, 'df2')
pytest.raises(ClosedFileError, store.remove, 'df2')
with pytest.raises(ClosedFileError, match='file is not open'):
store.select('df')
def test_pytables_native_read(self, datapath):
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf/pytables_native.h5'),
mode='r') as store:
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(PY35 and is_platform_windows(),
reason="native2 read fails oddly on windows / 3.5")
def test_pytables_native2_read(self, datapath):
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'pytables_native2.h5'),
mode='r') as store:
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
@xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'legacy_table_fixed_py2.h5'),
mode='r') as store:
result = store.select('df')
expected = pd.DataFrame([[1, 2, 3, 'D']],
columns=['A', 'B', 'C', 'D'],
index=pd.Index(['ABC'],
name='INDEX_NAME'))
assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'legacy_table_py2.h5'),
mode='r') as store:
result = store.select('table')
expected = pd.DataFrame({
"a": ["a", "b"],
"b": [2, 3]
})
assert_frame_equal(expected, result)
def test_copy(self):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns=['A'])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
assert store['a'].index[0] == dt
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
@xfail_non_writeable
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# pytest.raises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b',
'test & test', 'c', 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# Basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# Dtypes
_maybe_remove(store, 'si')
s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')
store.append('si', s)
result = store.select('si')
tm.assert_series_equal(result, s)
_maybe_remove(store, 'si2')
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')
store.append('si2', s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# Multiple
_maybe_remove(store, 'df2')
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2', df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert '/df2 ' in info
# assert '/df2/meta/values_block_0/meta' in info
assert '/df2/meta/values_block_1/meta' in info
# unordered
_maybe_remove(store, 's2')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# Query
_maybe_remove(store, 'df3')
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append('df3', df)
df = concat([df, df])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append('df3', df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select('df3/meta/s/meta')
assert result is not None
store.remove('df3')
with pytest.raises(KeyError):
store.select('df3/meta/s/meta')
def test_categorical_conversion(self):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ['ESP_012345_6789', 'ESP_987654_3210']
imgids = ['APF00006np', 'APF0001imm']
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype('category')
df.imgids = df.imgids.astype('category')
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame({
'a': ['a', 'b', 'c', np.nan],
'b': [np.nan, np.nan, np.nan, np.nan],
'c': [1, 2, 3, 4],
'd': pd.Series([None] * 4, dtype=object)
})
df['a'] = df.a.astype('category')
df['b'] = df.b.astype('category')
df['d'] = df.b.astype('category')
expected = df
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df')
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, df.to_hdf,
path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(
start='0s', periods=10, freq='1s', name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_columns_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
assert cols2load_original == cols2load
@ignore_natural_naming_warning
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]
types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
# TODO: Add back to types_should_fail
# https://github.com/pandas-dev/pandas/issues/20907
pass
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
msg = "cannot have non-object label DataIndexableCol"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, 'df', format='table',
data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(
path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
pytest.raises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
pytest.raises(IOError, read_hdf, store, 'df')
def test_read_hdf_generic_buffer_errors(self):
pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, 'df', complib='foolib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_table(self):
# GH13231
df = DataFrame({'i': range(5),
'c': Series(list('abacd'), dtype='category')})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a', format='table')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a', format='table')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_empty(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path)
store.close()
pytest.raises(ValueError, read_hdf, path)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self):
# GH11773
from pathlib import Path
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self):
# GH 14241
df = pd.DataFrame({'A': [1000000000.0009,
1000000000.0011,
1000000000.0015]})
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
cutoff = 1000000000.0006
result = store.select('test', "A < %.4f" % cutoff)
assert result.empty
cutoff = 1000000000.0010
result = store.select('test', "A > %.4f" % cutoff)
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select('test', 'A == %.4f' % exact)
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self):
# GH 15492
df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'],
'real_date': date_range('2014-01-01', periods=2),
'float': [1.1, 1.2],
'int': [1, 2]},
columns=['date', 'real_date', 'float', 'int'])
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
ts = pd.Timestamp('2014-01-01') # noqa
result = store.select('test', where='real_date > ts')
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ['<', '>', '==']:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp('2014-01-01'),
pd.Timedelta(1, 's')]:
query = 'date {op} v'.format(op=op)
with pytest.raises(TypeError):
store.select('test', where=query)
# strings to other columns must be convertible to type
v = 'a'
for col in ['int', 'float', 'real_date']:
query = '{col} {op} v'.format(op=op, col=col)
with pytest.raises(ValueError):
store.select('test', where=query)
for v, col in zip(['1', '1.1', '2014-01-01'],
['int', 'float', 'real_date']):
query = '{col} {op} v'.format(op=op, col=col)
result = store.select('test', where=query)
if op == '==':
expected = df.loc[[0], :]
elif op == '>':
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize('format', ['fixed', 'table'])
def test_read_hdf_series_mode_r(self, format):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with ensure_clean_path(self.path) as path:
series.to_hdf(path, key='data', format=format)
result = pd.read_hdf(path, key='data', mode='r')
tm.assert_series_equal(result, series)
@pytest.mark.skipif(not PY36, reason="Need python 3.6")
def test_fspath(self):
with tm.ensure_clean('foo.h5') as path:
with pd.HDFStore(path) as store:
assert os.fspath(store) == str(path)
def test_read_py2_hdf_file_in_py3(self, datapath):
# GH 16781
# tests reading a PeriodIndex DataFrame written in Python2 in Python3
# the file was generated in Python 2.7 like so:
#
# df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex(
# ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
# df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex(
['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'periodindex_0.20.1_x86_64_darwin_2.7.13.h5'),
mode='r') as store:
result = store['p']
assert_frame_equal(result, expected)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
@xfail_non_writeable
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where='A>2')
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
with catch_warnings(record=True):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
objs = [s, df]
comps = [tm.assert_series_equal, tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
with catch_warnings(record=True):
objs = [df]
comps = [tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
pytest.raises(TypeError, store.append,
'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(
"invalid tz comparison [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
from pandas._libs.tslibs.timezones import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(
'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where='A>=df.A[3]')
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130603',
tz=gettz('US/Eastern'))),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('EET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('CET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',
tz='US/Eastern') +
timedelta(hours=1) * i
for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
self._compare_with_tz(store.select(
'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='US/Eastern')),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='EET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='CET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == DatetimeIndex(result.values).tz
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A': rng,
'B': rng.tz_convert('UTC').tz_localize(None),
'C': rng.tz_convert('CET'),
'D': range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
with ensure_clean_store(self.path) as store:
with set_timezone('EST5EDT'):
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
with set_timezone('CST6CDT'):
result = store['obj1']
assert_frame_equal(result, df)
def test_legacy_datetimetz_object(self, datapath):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'datetimetz_object.h5'),
mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A': range(len(i)), 'B': i}, index=i)
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tseries/converter.py | 2 | 32615 | from datetime import datetime, timedelta
import datetime as pydt
import numpy as np
from dateutil.relativedelta import relativedelta
import matplotlib.units as units
import matplotlib.dates as dates
from matplotlib.ticker import Formatter, AutoLocator, Locator
from matplotlib.transforms import nonsingular
from pandas.compat import lrange
import pandas.compat as compat
import pandas.lib as lib
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.series import Series
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import FreqGroup
from pandas.tseries.period import Period, PeriodIndex
def register():
units.registry[lib.Timestamp] = DatetimeConverter()
units.registry[Period] = PeriodConverter()
units.registry[pydt.datetime] = DatetimeConverter()
units.registry[pydt.date] = DatetimeConverter()
units.registry[pydt.time] = TimeConverter()
units.registry[np.datetime64] = DatetimeConverter()
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
def time2num(d):
if isinstance(d, compat.string_types):
parsed = tools.to_datetime(d)
if not isinstance(parsed, datetime):
raise ValueError('Could not parse time %s' % d)
return _to_ordinalf(parsed.time())
if isinstance(d, pydt.time):
return _to_ordinalf(d)
return d
class TimeConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
valid_types = (str, pydt.time)
if (isinstance(value, valid_types) or com.is_integer(value) or
com.is_float(value)):
return time2num(value)
if isinstance(value, Index):
return value.map(time2num)
if isinstance(value, (list, tuple, np.ndarray, Index)):
return [time2num(x) for x in value]
return value
@staticmethod
def axisinfo(unit, axis):
if unit != 'time':
return None
majloc = AutoLocator()
majfmt = TimeFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')
@staticmethod
def default_units(x, axis):
return 'time'
# time formatter
class TimeFormatter(Formatter):
def __init__(self, locs):
self.locs = locs
def __call__(self, x, pos=0):
fmt = '%H:%M:%S'
s = int(x)
ms = int((x - s) * 1e3)
us = int((x - s) * 1e6 - ms)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
if us != 0:
fmt += '.%6f'
elif ms != 0:
fmt += '.%3f'
return pydt.time(h, m, s, us).strftime(fmt)
# Period Conversion
class PeriodConverter(dates.DateConverter):
@staticmethod
def convert(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
Period, pydt.date, pydt.time)
if (isinstance(values, valid_types) or com.is_integer(values) or
com.is_float(values)):
return get_datevalue(values, axis.freq)
if isinstance(values, PeriodIndex):
return values.asfreq(axis.freq).values
if isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
if com.is_period_arraylike(values):
return PeriodIndex(values, freq=axis.freq).values
if isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
pydt.date, pydt.time)):
return Period(date, freq).ordinal
elif (com.is_integer(date) or com.is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '%s'" % date)
def _dt_to_float_ordinal(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if (isinstance(dt, (np.ndarray, Index, Series)) and
com.is_datetime64_ns_dtype(dt)):
base = dates.epoch2num(dt.asi8 / 1.0E9)
else:
base = dates.date2num(dt)
return base
# Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
def try_parse(values):
try:
return _dt_to_float_ordinal(tools.to_datetime(values))
except Exception:
return values
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
return _dt_to_float_ordinal(lib.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (com.is_integer(values) or com.is_float(values)):
return values
elif isinstance(values, compat.string_types):
return try_parse(values)
elif isinstance(values, (list, tuple, np.ndarray, Index)):
if isinstance(values, Index):
values = values.values
if not isinstance(values, np.ndarray):
values = com._asarray_tuplesafe(values)
if com.is_integer_dtype(values) or com.is_float_dtype(values):
return values
try:
values = tools.to_datetime(values)
if isinstance(values, Index):
values = values.map(_dt_to_float_ordinal)
else:
values = [_dt_to_float_ordinal(x) for x in values]
except Exception:
pass
return values
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = PandasAutoDateLocator(tz=tz)
majfmt = PandasAutoDateFormatter(majloc, tz=tz)
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)
# matplotlib.dates._UTC has no _utcoffset called by pandas
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
num_days = ((delta.years * 12.0) + delta.months * 31.0) + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
def __init__(self, tz):
dates.DateLocator.__init__(self, tz)
self._interval = 1.
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = dates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate %d '
'ticks from %s to %s: exceeds Locator.MAXTICKS'
'* 2 (%d) ') %
(estimate, dmin, dmax, self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += timedelta(microseconds=1e6 - microsecond)
return dt
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1, period)
return (current - previous).nonzero()[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
if freq == FreqGroup.FR_NS:
periodsperday = 24 * 60 * 60 * 1000000000
elif freq == FreqGroup.FR_US:
periodsperday = 24 * 60 * 60 * 1000000
elif freq == FreqGroup.FR_MS:
periodsperday = 24 * 60 * 60 * 1000
elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
else: # pragma: no cover
raise ValueError("unexpected frequency: %s" % freq)
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FreqGroup.FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FreqGroup.FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),
Period(ordinal=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', np.int64), ('maj', bool),
('min', bool), ('fmt', '|S20')])
info['val'][:] = dates_.values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_ - 1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_ - 1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_ - 1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second %
label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0:
_second_finder(1)
elif span < periodsperday / 6000.0:
_second_finder(2)
elif span < periodsperday / 2400.0:
_second_finder(5)
elif span < periodsperday / 1200.0:
_second_finder(10)
elif span < periodsperday / 800.0:
_second_finder(15)
elif span < periodsperday / 400.0:
_second_finder(30)
elif span < periodsperday / 150.0:
_minute_finder(1)
elif span < periodsperday / 70.0:
_minute_finder(2)
elif span < periodsperday / 24.0:
_minute_finder(5)
elif span < periodsperday / 12.0:
_minute_finder(15)
elif span < periodsperday / 6.0:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FreqGroup.FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
return info
def _monthly_finder(vmin, vmax, freq):
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
return info
def _quarterly_finder(vmin, vmax, freq):
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
return info
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
return info
def get_finder(freq):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
fgroup = frequencies.get_freq_group(freq)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
else: # pragma: no cover
errmsg = "Unsupported frequency: %s" % (freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = lrange(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
# -------------------------------------------------------------------------
# --- Formatter ---
# -------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = dict([(x, f) for (x, _, _, f) in format])
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
| mit |
tjlaboss/openmc | openmc/data/photon.py | 8 | 44955 | from collections import OrderedDict
from collections.abc import Mapping, Callable
from copy import deepcopy
from io import StringIO
from math import pi
from numbers import Integral, Real
import os
import h5py
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from . import HDF5_VERSION
from .ace import Table, get_metadata, get_table
from .data import ATOMIC_SYMBOL, EV_PER_MEV
from .endf import Evaluation, get_head_record, get_tab1_record, get_list_record
from .function import Tabulated1D
# Constants
MASS_ELECTRON_EV = 0.5109989461e6 # Electron mass energy
PLANCK_C = 1.2398419739062977e4 # Planck's constant times c in eV-Angstroms
FINE_STRUCTURE = 137.035999139 # Inverse fine structure constant
CM_PER_ANGSTROM = 1.0e-8
# classical electron radius in cm
R0 = CM_PER_ANGSTROM * PLANCK_C / (2.0 * pi * FINE_STRUCTURE * MASS_ELECTRON_EV)
# Electron subshell labels
_SUBSHELLS = [None, 'K', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5',
'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2', 'O3',
'O4', 'O5', 'O6', 'O7', 'O8', 'O9', 'P1', 'P2', 'P3', 'P4',
'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'Q1', 'Q2', 'Q3']
_REACTION_NAME = {
501: ('Total photon interaction', 'total'),
502: ('Photon coherent scattering', 'coherent'),
504: ('Photon incoherent scattering', 'incoherent'),
515: ('Pair production, electron field', 'pair_production_electron'),
516: ('Total pair production', 'pair_production_total'),
517: ('Pair production, nuclear field', 'pair_production_nuclear'),
522: ('Photoelectric absorption', 'photoelectric'),
525: ('Heating', 'heating'),
526: ('Electro-atomic scattering', 'electro_atomic_scat'),
527: ('Electro-atomic bremsstrahlung', 'electro_atomic_brem'),
528: ('Electro-atomic excitation', 'electro_atomic_excit'),
534: ('K (1s1/2) subshell photoelectric', 'K'),
535: ('L1 (2s1/2) subshell photoelectric', 'L1'),
536: ('L2 (2p1/2) subshell photoelectric', 'L2'),
537: ('L3 (2p3/2) subshell photoelectric', 'L3'),
538: ('M1 (3s1/2) subshell photoelectric', 'M1'),
539: ('M2 (3p1/2) subshell photoelectric', 'M2'),
540: ('M3 (3p3/2) subshell photoelectric', 'M3'),
541: ('M4 (3d3/2) subshell photoelectric', 'M4'),
542: ('M5 (3d5/2) subshell photoelectric', 'M5'),
543: ('N1 (4s1/2) subshell photoelectric', 'N1'),
544: ('N2 (4p1/2) subshell photoelectric', 'N2'),
545: ('N3 (4p3/2) subshell photoelectric', 'N3'),
546: ('N4 (4d3/2) subshell photoelectric', 'N4'),
547: ('N5 (4d5/2) subshell photoelectric', 'N5'),
548: ('N6 (4f5/2) subshell photoelectric', 'N6'),
549: ('N7 (4f7/2) subshell photoelectric', 'N7'),
550: ('O1 (5s1/2) subshell photoelectric', 'O1'),
551: ('O2 (5p1/2) subshell photoelectric', 'O2'),
552: ('O3 (5p3/2) subshell photoelectric', 'O3'),
553: ('O4 (5d3/2) subshell photoelectric', 'O4'),
554: ('O5 (5d5/2) subshell photoelectric', 'O5'),
555: ('O6 (5f5/2) subshell photoelectric', 'O6'),
556: ('O7 (5f7/2) subshell photoelectric', 'O7'),
557: ('O8 (5g7/2) subshell photoelectric', 'O8'),
558: ('O9 (5g9/2) subshell photoelectric', 'O9'),
559: ('P1 (6s1/2) subshell photoelectric', 'P1'),
560: ('P2 (6p1/2) subshell photoelectric', 'P2'),
561: ('P3 (6p3/2) subshell photoelectric', 'P3'),
562: ('P4 (6d3/2) subshell photoelectric', 'P4'),
563: ('P5 (6d5/2) subshell photoelectric', 'P5'),
564: ('P6 (6f5/2) subshell photoelectric', 'P6'),
565: ('P7 (6f7/2) subshell photoelectric', 'P7'),
566: ('P8 (6g7/2) subshell photoelectric', 'P8'),
567: ('P9 (6g9/2) subshell photoelectric', 'P9'),
568: ('P10 (6h9/2) subshell photoelectric', 'P10'),
569: ('P11 (6h11/2) subshell photoelectric', 'P11'),
570: ('Q1 (7s1/2) subshell photoelectric', 'Q1'),
571: ('Q2 (7p1/2) subshell photoelectric', 'Q2'),
572: ('Q3 (7p3/2) subshell photoelectric', 'Q3')
}
# Compton profiles are read from a pre-generated HDF5 file when they are first
# needed. The dictionary stores an array of electron momentum values (at which
# the profiles are tabulated) with the key 'pz' and the profile for each element
# is a 2D array with shape (n_shells, n_momentum_values) stored on the key Z
_COMPTON_PROFILES = {}
# Scaled bremsstrahlung DCSs are read from a data file provided by Selzter and
# Berger when they are first needed. The dictionary stores an array of n
# incident electron kinetic energies with key 'electron_energies', an array of
# k reduced photon energies with key 'photon_energies', and the cross sections
# for each element are in a 2D array with shape (n, k) stored on the key 'Z'.
# It also stores data used for calculating the density effect correction and
# stopping power, namely, the mean excitation energy with the key 'I', number
# of electrons per subshell with the key 'num_electrons', and binding energies
# with the key 'ionization_energy'.
_BREMSSTRAHLUNG = {}
class AtomicRelaxation(EqualityMixin):
"""Atomic relaxation data.
This class stores the binding energy, number of electrons, and electron
transitions possible from ioniziation for each electron subshell of an
atom. All of the data originates from an ENDF-6 atomic relaxation
sub-library (NSUB=6). Instances of this class are not normally instantiated
directly but rather created using the factory method
:math:`AtomicRelaxation.from_endf`.
Parameters
----------
binding_energy : dict
Dictionary indicating the binding energy in eV (values) for given
subshells (keys). The subshells should be given as strings, e.g., 'K',
'L1', 'L2', etc.
num_electrons : dict
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
a DataFrame with columns indicating the secondary and tertiary subshell,
the energy of the transition in eV, and the fractional probability of
the transition.
Attributes
----------
binding_energy : dict
Dictionary indicating the binding energy in eV (values) for given
subshells (keys). The subshells should be given as strings, e.g., 'K',
'L1', 'L2', etc.
num_electrons : dict
Dictionary indicating the number of electrons in a subshell when neutral
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc.
transitions : pandas.DataFrame
Dictionary indicating allowed transitions and their probabilities
(values) for given subshells (keys). The subshells should be given as
strings, e.g., 'K', 'L1', 'L2', etc. The transitions are represented as
a DataFrame with columns indicating the secondary and tertiary subshell,
the energy of the transition in eV, and the fractional probability of
the transition.
See Also
--------
IncidentPhoton
"""
def __init__(self, binding_energy, num_electrons, transitions):
self.binding_energy = binding_energy
self.num_electrons = num_electrons
self.transitions = transitions
self._e_fluorescence = {}
@property
def binding_energy(self):
return self._binding_energy
@property
def num_electrons(self):
return self._num_electrons
@property
def subshells(self):
return list(sorted(self.binding_energy.keys()))
@property
def transitions(self):
return self._transitions
@binding_energy.setter
def binding_energy(self, binding_energy):
cv.check_type('binding energies', binding_energy, Mapping)
for subshell, energy in binding_energy.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('binding energy', energy, Real)
cv.check_greater_than('binding energy', energy, 0.0, True)
self._binding_energy = binding_energy
@num_electrons.setter
def num_electrons(self, num_electrons):
cv.check_type('number of electrons', num_electrons, Mapping)
for subshell, num in num_electrons.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('number of electrons', num, Real)
cv.check_greater_than('number of electrons', num, 0.0, True)
self._num_electrons = num_electrons
@transitions.setter
def transitions(self, transitions):
cv.check_type('transitions', transitions, Mapping)
for subshell, df in transitions.items():
cv.check_value('subshell', subshell, _SUBSHELLS)
cv.check_type('transitions', df, pd.DataFrame)
self._transitions = transitions
@classmethod
def from_ace(cls, ace):
"""Generate atomic relaxation data from an ACE file
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
# Get shell designators
n = ace.nxs[7]
idx = ace.jxs[11]
shells = [_SUBSHELLS[int(i)] for i in ace.xss[idx : idx+n]]
# Get number of electrons for each shell
idx = ace.jxs[12]
for shell, num in zip(shells, ace.xss[idx : idx+n]):
num_electrons[shell] = num
# Get binding energy for each shell
idx = ace.jxs[13]
for shell, e in zip(shells, ace.xss[idx : idx+n]):
binding_energy[shell] = e*EV_PER_MEV
# Get transition table
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
idx = ace.jxs[18]
for i, subi in enumerate(shells):
n_transitions = int(ace.xss[ace.jxs[15] + i])
if n_transitions > 0:
records = []
for j in range(n_transitions):
subj = _SUBSHELLS[int(ace.xss[idx])]
subk = _SUBSHELLS[int(ace.xss[idx + 1])]
etr = ace.xss[idx + 2]*EV_PER_MEV
if j == 0:
ftr = ace.xss[idx + 3]
else:
ftr = ace.xss[idx + 3] - ace.xss[idx - 1]
records.append((subj, subk, etr, ftr))
idx += 4
# Create dataframe for transitions
transitions[subi] = pd.DataFrame.from_records(
records, columns=columns)
return cls(binding_energy, num_electrons, transitions)
@classmethod
def from_endf(cls, ev_or_filename):
"""Generate atomic relaxation data from an ENDF evaluation
Parameters
----------
ev_or_filename : str or openmc.data.endf.Evaluation
ENDF atomic relaxation evaluation to read from. If given as a
string, it is assumed to be the filename for the ENDF file.
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
if isinstance(ev_or_filename, Evaluation):
ev = ev_or_filename
else:
ev = Evaluation(ev_or_filename)
# Atomic relaxation data is always MF=28, MT=533
if (28, 533) not in ev.section:
raise IOError('{} does not appear to be an atomic relaxation '
'sublibrary.'.format(ev))
# Determine number of subshells
file_obj = StringIO(ev.section[28, 533])
params = get_head_record(file_obj)
n_subshells = params[4]
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
# Read data for each subshell
for i in range(n_subshells):
params, list_items = get_list_record(file_obj)
subi = _SUBSHELLS[int(params[0])]
n_transitions = int(params[5])
binding_energy[subi] = list_items[0]
num_electrons[subi] = list_items[1]
if n_transitions > 0:
# Read transition data
records = []
for j in range(n_transitions):
subj = _SUBSHELLS[int(list_items[6*(j+1)])]
subk = _SUBSHELLS[int(list_items[6*(j+1) + 1])]
etr = list_items[6*(j+1) + 2]
ftr = list_items[6*(j+1) + 3]
records.append((subj, subk, etr, ftr))
# Create dataframe for transitions
transitions[subi] = pd.DataFrame.from_records(
records, columns=columns)
# Return instance of class
return cls(binding_energy, num_electrons, transitions)
@classmethod
def from_hdf5(cls, group):
"""Generate atomic relaxation data from an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.AtomicRelaxation
Atomic relaxation data
"""
# Create data dictionaries
binding_energy = {}
num_electrons = {}
transitions = {}
designators = [s.decode() for s in group.attrs['designators']]
columns = ['secondary', 'tertiary', 'energy (eV)', 'probability']
for shell in designators:
# Shell group
sub_group = group[shell]
# Read subshell binding energy and number of electrons
if 'binding_energy' in sub_group.attrs:
binding_energy[shell] = sub_group.attrs['binding_energy']
if 'num_electrons' in sub_group.attrs:
num_electrons[shell] = sub_group.attrs['num_electrons']
# Read transition data
if 'transitions' in sub_group:
df = pd.DataFrame(sub_group['transitions'][()],
columns=columns)
# Replace float indexes back to subshell strings
df[columns[:2]] = df[columns[:2]].replace(
np.arange(float(len(_SUBSHELLS))), _SUBSHELLS)
transitions[shell] = df
return cls(binding_energy, num_electrons, transitions)
def to_hdf5(self, group, shell):
"""Write atomic relaxation data to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
shell : str
The subshell to write data for
"""
# Write subshell binding energy and number of electrons
group.attrs['binding_energy'] = self.binding_energy[shell]
group.attrs['num_electrons'] = self.num_electrons[shell]
# Write transition data with replacements
if shell in self.transitions:
df = self.transitions[shell].replace(
_SUBSHELLS, range(len(_SUBSHELLS)))
group.create_dataset('transitions', data=df.values.astype(float))
class IncidentPhoton(EqualityMixin):
r"""Photon interaction data.
This class stores photo-atomic, photo-nuclear, atomic relaxation,
Compton profile, stopping power, and bremsstrahlung data assembled from
different sources. To create an instance, the factory method
:meth:`IncidentPhoton.from_endf` can be used. To add atomic relaxation or
Compton profile data, set the :attr:`IncidentPhoton.atomic_relaxation` and
:attr:`IncidentPhoton.compton_profiles` attributes directly.
Parameters
----------
atomic_number : int
Number of protons in the target nucleus
Attributes
----------
atomic_number : int
Number of protons in the target nucleus
atomic_relaxation : openmc.data.AtomicRelaxation or None
Atomic relaxation data
bremsstrahlung : dict
Dictionary of bremsstrahlung data with keys 'I' (mean excitation energy
in [eV]), 'num_electrons' (number of electrons in each subshell),
'ionization_energy' (ionization potential of each subshell),
'electron_energy' (incident electron kinetic energy values in [eV]),
'photon_energy' (ratio of the energy of the emitted photon to the
incident electron kinetic energy), and 'dcs' (cross section values in
[b]). The cross sections are in scaled form: :math:`(\beta^2/Z^2) E_k
(d\sigma/dE_k)`, where :math:`E_k` is the energy of the emitted photon.
A negative number of electrons in a subshell indicates conduction
electrons.
compton_profiles : dict
Dictionary of Compton profile data with keys 'num_electrons' (number of
electrons in each subshell), 'binding_energy' (ionization potential of
each subshell), and 'J' (Hartree-Fock Compton profile as a function of
the projection of the electron momentum on the scattering vector,
:math:`p_z` for each subshell). Note that subshell occupancies may not
match the atomic relaxation data.
reactions : collections.OrderedDict
Contains the cross sections for each photon reaction. The keys are MT
values and the values are instances of :class:`PhotonReaction`.
"""
def __init__(self, atomic_number):
self.atomic_number = atomic_number
self._atomic_relaxation = None
self.reactions = OrderedDict()
self.compton_profiles = {}
self.bremsstrahlung = {}
def __contains__(self, mt):
return mt in self.reactions
def __getitem__(self, mt):
if mt in self.reactions:
return self.reactions[mt]
else:
raise KeyError('No reaction with MT={}.'.format(mt))
def __repr__(self):
return "<IncidentPhoton: {}>".format(self.name)
def __iter__(self):
return iter(self.reactions.values())
@property
def atomic_number(self):
return self._atomic_number
@property
def atomic_relaxation(self):
return self._atomic_relaxation
@property
def name(self):
return ATOMIC_SYMBOL[self.atomic_number]
@atomic_number.setter
def atomic_number(self, atomic_number):
cv.check_type('atomic number', atomic_number, Integral)
cv.check_greater_than('atomic number', atomic_number, 0, True)
self._atomic_number = atomic_number
@atomic_relaxation.setter
def atomic_relaxation(self, atomic_relaxation):
cv.check_type('atomic relaxation data', atomic_relaxation,
AtomicRelaxation)
self._atomic_relaxation = atomic_relaxation
@classmethod
def from_ace(cls, ace_or_filename):
"""Generate incident photon data from an ACE table
Parameters
----------
ace_or_filename : str or openmc.data.ace.Table
ACE table to read from. If given as a string, it is assumed to be
the filename for the ACE file.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
# First obtain the data for the first provided ACE table/file
if isinstance(ace_or_filename, Table):
ace = ace_or_filename
else:
ace = get_table(ace_or_filename)
# Get atomic number based on name of ACE table
zaid, xs = ace.name.split('.')
if not xs.endswith('p'):
raise TypeError("{} is not a photoatomic transport ACE table.".format(ace))
Z = get_metadata(int(zaid))[2]
# Read each reaction
data = cls(Z)
for mt in (502, 504, 515, 522, 525):
data.reactions[mt] = PhotonReaction.from_ace(ace, mt)
# Get heating cross sections [eV-barn] from factors [eV per collision]
# by multiplying with total xs
data.reactions[525].xs.y *= sum([data.reactions[mt].xs.y for mt in
(502, 504, 515, 522)])
# Compton profiles
n_shell = ace.nxs[5]
if n_shell != 0:
# Get number of electrons in each shell
idx = ace.jxs[6]
data.compton_profiles['num_electrons'] = ace.xss[idx : idx+n_shell]
# Get binding energy for each shell
idx = ace.jxs[7]
e = ace.xss[idx : idx+n_shell]*EV_PER_MEV
data.compton_profiles['binding_energy'] = e
# Create Compton profile for each electron shell
profiles = []
for k in range(n_shell):
# Get number of momentum values and interpolation scheme
loca = int(ace.xss[ace.jxs[9] + k])
jj = int(ace.xss[ace.jxs[10] + loca - 1])
m = int(ace.xss[ace.jxs[10] + loca])
# Read momentum and PDF
idx = ace.jxs[10] + loca + 1
pz = ace.xss[idx : idx+m]
pdf = ace.xss[idx+m : idx+2*m]
# Create proflie function
J_k = Tabulated1D(pz, pdf, [m], [jj])
profiles.append(J_k)
data.compton_profiles['J'] = profiles
# Subshell photoelectric xs and atomic relaxation data
if ace.nxs[7] > 0:
data.atomic_relaxation = AtomicRelaxation.from_ace(ace)
# Get subshell designators
n_subshells = ace.nxs[7]
idx = ace.jxs[11]
designators = [int(i) for i in ace.xss[idx : idx+n_subshells]]
# Get energy grid for subshell photoionization
n_energy = ace.nxs[3]
idx = ace.jxs[1]
energy = np.exp(ace.xss[idx : idx+n_energy])*EV_PER_MEV
# Get cross section for each subshell
idx = ace.jxs[16]
for d in designators:
# Create photon reaction
mt = 533 + d
rx = PhotonReaction(mt)
data.reactions[mt] = rx
# Store cross section, determining threshold
xs = ace.xss[idx : idx+n_energy].copy()
nonzero = (xs != 0.0)
xs[nonzero] = np.exp(xs[nonzero])
threshold = np.where(xs > 0.0)[0][0]
rx.xs = Tabulated1D(energy[threshold:], xs[threshold:],
[n_energy - threshold], [5])
idx += n_energy
# Copy binding energy
shell = _SUBSHELLS[d]
e = data.atomic_relaxation.binding_energy[shell]
rx.subshell_binding_energy = e
else:
raise ValueError("ACE table {} does not have subshell data. Only "
"newer ACE photoatomic libraries are supported "
"(e.g., eprdata14).".format(ace.name))
# Add bremsstrahlung DCS data
data._add_bremsstrahlung()
return data
@classmethod
def from_endf(cls, photoatomic, relaxation=None):
"""Generate incident photon data from an ENDF evaluation
Parameters
----------
photoatomic : str or openmc.data.endf.Evaluation
ENDF photoatomic data evaluation to read from. If given as a string,
it is assumed to be the filename for the ENDF file.
relaxation : str or openmc.data.endf.Evaluation, optional
ENDF atomic relaxation data evaluation to read from. If given as a
string, it is assumed to be the filename for the ENDF file.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
if isinstance(photoatomic, Evaluation):
ev = photoatomic
else:
ev = Evaluation(photoatomic)
Z = ev.target['atomic_number']
data = cls(Z)
# Read each reaction
for mf, mt, nc, mod in ev.reaction_list:
if mf == 23:
data.reactions[mt] = PhotonReaction.from_endf(ev, mt)
# Add atomic relaxation data if it hasn't been added already
if relaxation is not None:
data.atomic_relaxation = AtomicRelaxation.from_endf(relaxation)
# If Compton profile data hasn't been loaded, do so
if not _COMPTON_PROFILES:
filename = os.path.join(os.path.dirname(__file__), 'compton_profiles.h5')
with h5py.File(filename, 'r') as f:
_COMPTON_PROFILES['pz'] = f['pz'][()]
for i in range(1, 101):
group = f['{:03}'.format(i)]
num_electrons = group['num_electrons'][()]
binding_energy = group['binding_energy'][()]*EV_PER_MEV
J = group['J'][()]
_COMPTON_PROFILES[i] = {'num_electrons': num_electrons,
'binding_energy': binding_energy,
'J': J}
# Add Compton profile data
pz = _COMPTON_PROFILES['pz']
profile = _COMPTON_PROFILES[Z]
data.compton_profiles['num_electrons'] = profile['num_electrons']
data.compton_profiles['binding_energy'] = profile['binding_energy']
data.compton_profiles['J'] = [Tabulated1D(pz, J_k) for J_k in profile['J']]
# Add bremsstrahlung DCS data
data._add_bremsstrahlung()
return data
@classmethod
def from_hdf5(cls, group_or_filename):
"""Generate photon reaction from an HDF5 group
Parameters
----------
group_or_filename : h5py.Group or str
HDF5 group containing interaction data. If given as a string, it is
assumed to be the filename for the HDF5 file, and the first group is
used to read from.
Returns
-------
openmc.data.IncidentPhoton
Photon interaction data
"""
if isinstance(group_or_filename, h5py.Group):
group = group_or_filename
need_to_close = False
else:
h5file = h5py.File(str(group_or_filename), 'r')
need_to_close = True
# Make sure version matches
if 'version' in h5file.attrs:
major, minor = h5file.attrs['version']
# For now all versions of HDF5 data can be read
else:
raise IOError(
'HDF5 data does not indicate a version. Your installation '
'of the OpenMC Python API expects version {}.x data.'
.format(HDF5_VERSION_MAJOR))
group = list(h5file.values())[0]
Z = group.attrs['Z']
data = cls(Z)
# Read energy grid
energy = group['energy'][()]
# Read cross section data
for mt, (name, key) in _REACTION_NAME.items():
if key in group:
rgroup = group[key]
elif key in group['subshells']:
rgroup = group['subshells'][key]
else:
continue
data.reactions[mt] = PhotonReaction.from_hdf5(rgroup, mt, energy)
# Check for necessary reactions
for mt in (502, 504, 522):
assert mt in data, "Reaction {} not found".format(mt)
# Read atomic relaxation
data.atomic_relaxation = AtomicRelaxation.from_hdf5(group['subshells'])
# Read Compton profiles
if 'compton_profiles' in group:
rgroup = group['compton_profiles']
profile = data.compton_profiles
profile['num_electrons'] = rgroup['num_electrons'][()]
profile['binding_energy'] = rgroup['binding_energy'][()]
# Get electron momentum values
pz = rgroup['pz'][()]
J = rgroup['J'][()]
if pz.size != J.shape[1]:
raise ValueError("'J' array shape is not consistent with the "
"'pz' array shape")
profile['J'] = [Tabulated1D(pz, Jk) for Jk in J]
# Read bremsstrahlung
if 'bremsstrahlung' in group:
rgroup = group['bremsstrahlung']
data.bremsstrahlung['I'] = rgroup.attrs['I']
for key in ('dcs', 'electron_energy', 'ionization_energy',
'num_electrons', 'photon_energy'):
data.bremsstrahlung[key] = rgroup[key][()]
# If HDF5 file was opened here, make sure it gets closed
if need_to_close:
h5file.close()
return data
def export_to_hdf5(self, path, mode='a', libver='earliest'):
"""Export incident photon data to an HDF5 file.
Parameters
----------
path : str
Path to write HDF5 file to
mode : {'r+', 'w', 'x', 'a'}
Mode that is used to open the HDF5 file. This is the second argument
to the :class:`h5py.File` constructor.
libver : {'earliest', 'latest'}
Compatibility mode for the HDF5 file. 'latest' will produce files
that are less backwards compatible but have performance benefits.
"""
with h5py.File(str(path), mode, libver=libver) as f:
# Write filetype and version
f.attrs['filetype'] = np.string_('data_photon')
if 'version' not in f.attrs:
f.attrs['version'] = np.array(HDF5_VERSION)
group = f.create_group(self.name)
group.attrs['Z'] = Z = self.atomic_number
# Determine union energy grid
union_grid = np.array([])
for rx in self:
union_grid = np.union1d(union_grid, rx.xs.x)
group.create_dataset('energy', data=union_grid)
# Write cross sections
shell_group = group.create_group('subshells')
designators = []
for mt, rx in self.reactions.items():
name, key = _REACTION_NAME[mt]
if mt in (502, 504, 515, 517, 522, 525):
sub_group = group.create_group(key)
elif mt >= 534 and mt <= 572:
# Subshell
designators.append(key)
sub_group = shell_group.create_group(key)
# Write atomic relaxation
if self.atomic_relaxation is not None:
if key in self.atomic_relaxation.subshells:
self.atomic_relaxation.to_hdf5(sub_group, key)
else:
continue
rx.to_hdf5(sub_group, union_grid, Z)
shell_group.attrs['designators'] = np.array(designators, dtype='S')
# Write Compton profiles
if self.compton_profiles:
compton_group = group.create_group('compton_profiles')
profile = self.compton_profiles
compton_group.create_dataset('num_electrons',
data=profile['num_electrons'])
compton_group.create_dataset('binding_energy',
data=profile['binding_energy'])
# Get electron momentum values
compton_group.create_dataset('pz', data=profile['J'][0].x)
# Create/write 2D array of profiles
J = np.array([Jk.y for Jk in profile['J']])
compton_group.create_dataset('J', data=J)
# Write bremsstrahlung
if self.bremsstrahlung:
brem_group = group.create_group('bremsstrahlung')
for key, value in self.bremsstrahlung.items():
if key == 'I':
brem_group.attrs[key] = value
else:
brem_group.create_dataset(key, data=value)
def _add_bremsstrahlung(self):
"""Add the data used in the thick-target bremsstrahlung approximation
"""
# Load bremsstrahlung data if it has not yet been loaded
if not _BREMSSTRAHLUNG:
# Add data used for density effect correction
filename = os.path.join(os.path.dirname(__file__), 'density_effect.h5')
with h5py.File(filename, 'r') as f:
for i in range(1, 101):
group = f['{:03}'.format(i)]
_BREMSSTRAHLUNG[i] = {
'I': group.attrs['I'],
'num_electrons': group['num_electrons'][()],
'ionization_energy': group['ionization_energy'][()]
}
filename = os.path.join(os.path.dirname(__file__), 'BREMX.DAT')
with open(filename, 'r') as fh:
brem = fh.read().split()
# Incident electron kinetic energy grid in eV
_BREMSSTRAHLUNG['electron_energy'] = np.logspace(3, 9, 200)
log_energy = np.log(_BREMSSTRAHLUNG['electron_energy'])
# Get number of tabulated electron and photon energy values
n = int(brem[37])
k = int(brem[38])
# Index in data
p = 39
# Get log of incident electron kinetic energy values, used for
# cubic spline interpolation in log energy. Units are in MeV, so
# convert to eV.
logx = np.log(np.fromiter(brem[p:p+n], float, n)*EV_PER_MEV)
p += n
# Get reduced photon energy values
_BREMSSTRAHLUNG['photon_energy'] = np.fromiter(brem[p:p+k], float, k)
p += k
for i in range(1, 101):
dcs = np.empty([len(log_energy), k])
# Get the scaled cross section values for each electron energy
# and reduced photon energy for this Z. Units are in mb, so
# convert to b.
y = np.reshape(np.fromiter(brem[p:p+n*k], float, n*k), (n, k))*1.0e-3
p += k*n
for j in range(k):
# Cubic spline interpolation in log energy and linear DCS
cs = CubicSpline(logx, y[:, j])
# Get scaled DCS values (barns) on new energy grid
dcs[:, j] = cs(log_energy)
_BREMSSTRAHLUNG[i]['dcs'] = dcs
# Add bremsstrahlung DCS data
self.bremsstrahlung['electron_energy'] = _BREMSSTRAHLUNG['electron_energy']
self.bremsstrahlung['photon_energy'] = _BREMSSTRAHLUNG['photon_energy']
self.bremsstrahlung.update(_BREMSSTRAHLUNG[self.atomic_number])
class PhotonReaction(EqualityMixin):
"""Photon-induced reaction
Parameters
----------
mt : int
The ENDF MT number for this reaction.
Attributes
----------
anomalous_real : openmc.data.Tabulated1D
Real part of the anomalous scattering factor
anomlaous_imag : openmc.data.Tabulated1D
Imaginary part of the anomalous scatttering factor
mt : int
The ENDF MT number for this reaction.
scattering_factor : openmc.data.Tabulated1D
Coherent or incoherent form factor.
xs : Callable
Cross section as a function of incident photon energy
"""
def __init__(self, mt):
self.mt = mt
self._xs = None
self._scattering_factor = None
self._anomalous_real = None
self._anomalous_imag = None
def __repr__(self):
if self.mt in _REACTION_NAME:
return "<Photon Reaction: MT={} {}>".format(
self.mt, _REACTION_NAME[self.mt][0])
else:
return "<Photon Reaction: MT={}>".format(self.mt)
@property
def anomalous_real(self):
return self._anomalous_real
@property
def anomalous_imag(self):
return self._anomalous_imag
@property
def scattering_factor(self):
return self._scattering_factor
@property
def xs(self):
return self._xs
@anomalous_real.setter
def anomalous_real(self, anomalous_real):
cv.check_type('real part of anomalous scattering factor',
anomalous_real, Callable)
self._anomalous_real = anomalous_real
@anomalous_imag.setter
def anomalous_imag(self, anomalous_imag):
cv.check_type('imaginary part of anomalous scattering factor',
anomalous_imag, Callable)
self._anomalous_imag = anomalous_imag
@scattering_factor.setter
def scattering_factor(self, scattering_factor):
cv.check_type('scattering factor', scattering_factor, Callable)
self._scattering_factor = scattering_factor
@xs.setter
def xs(self, xs):
cv.check_type('reaction cross section', xs, Callable)
self._xs = xs
@classmethod
def from_ace(cls, ace, mt):
"""Generate photon reaction from an ACE table
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
mt : int
The MT value of the reaction to get data for
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
# Create instance
rx = cls(mt)
# Get energy grid (stored as logarithms)
n = ace.nxs[3]
idx = ace.jxs[1]
energy = np.exp(ace.xss[idx : idx+n])*EV_PER_MEV
# Get index for appropriate reaction
if mt == 502:
# Coherent scattering
idx = ace.jxs[1] + 2*n
elif mt == 504:
# Incoherent scattering
idx = ace.jxs[1] + n
elif mt == 515:
# Pair production
idx = ace.jxs[1] + 4*n
elif mt == 522:
# Photoelectric
idx = ace.jxs[1] + 3*n
elif mt == 525:
# Heating
idx = ace.jxs[5]
else:
raise ValueError('ACE photoatomic cross sections do not have '
'data for MT={}.'.format(mt))
# Store cross section
xs = ace.xss[idx : idx+n].copy()
if mt == 525:
# Get heating factors in [eV per collision]
xs *= EV_PER_MEV
else:
nonzero = (xs != 0.0)
xs[nonzero] = np.exp(xs[nonzero])
rx.xs = Tabulated1D(energy, xs, [n], [5])
# Get form factors for incoherent/coherent scattering
new_format = (ace.nxs[6] > 0)
if mt == 502:
idx = ace.jxs[3]
if new_format:
n = (ace.jxs[4] - ace.jxs[3]) // 3
x = ace.xss[idx : idx+n]
idx += n
else:
x = np.array([
0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.08, 0.1, 0.12,
0.15, 0.18, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55,
0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6,
1.7, 1.8, 1.9, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4,
3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0, 5.2, 5.4, 5.6,
5.8, 6.0])
n = x.size
ff = ace.xss[idx+n : idx+2*n]
rx.scattering_factor = Tabulated1D(x, ff)
elif mt == 504:
idx = ace.jxs[2]
if new_format:
n = (ace.jxs[3] - ace.jxs[2]) // 2
x = ace.xss[idx : idx+n]
idx += n
else:
x = np.array([
0.0, 0.005, 0.01, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6,
0.7, 0.8, 0.9, 1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 8.0
])
n = x.size
ff = ace.xss[idx : idx+n]
rx.scattering_factor = Tabulated1D(x, ff)
return rx
@classmethod
def from_endf(cls, ev, mt):
"""Generate photon reaction from an ENDF evaluation
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF photo-atomic interaction data evaluation
mt : int
The MT value of the reaction to get data for
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
rx = cls(mt)
# Read photon cross section
if (23, mt) in ev.section:
file_obj = StringIO(ev.section[23, mt])
get_head_record(file_obj)
params, rx.xs = get_tab1_record(file_obj)
# Set subshell binding energy and/or fluorescence yield
if mt >= 534 and mt <= 599:
rx.subshell_binding_energy = params[0]
if mt >= 534 and mt <= 572:
rx.fluorescence_yield = params[1]
# Read form factors / scattering functions
if (27, mt) in ev.section:
file_obj = StringIO(ev.section[27, mt])
get_head_record(file_obj)
params, rx.scattering_factor = get_tab1_record(file_obj)
# Check for anomalous scattering factor
if mt == 502:
if (27, 506) in ev.section:
file_obj = StringIO(ev.section[27, 506])
get_head_record(file_obj)
params, rx.anomalous_real = get_tab1_record(file_obj)
if (27, 505) in ev.section:
file_obj = StringIO(ev.section[27, 505])
get_head_record(file_obj)
params, rx.anomalous_imag = get_tab1_record(file_obj)
return rx
@classmethod
def from_hdf5(cls, group, mt, energy):
"""Generate photon reaction from an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to read from
mt : int
The MT value of the reaction to get data for
energy : Iterable of float
arrays of energies at which cross sections are tabulated at
Returns
-------
openmc.data.PhotonReaction
Photon reaction data
"""
# Create instance
rx = cls(mt)
# Cross sections
xs = group['xs'][()]
# Replace zero elements to small non-zero to enable log-log
xs[xs == 0.0] = np.exp(-500.0)
# Threshold
threshold_idx = 0
if 'threshold_idx' in group['xs'].attrs:
threshold_idx = group['xs'].attrs['threshold_idx']
# Store cross section
rx.xs = Tabulated1D(energy[threshold_idx:], xs, [len(xs)], [5])
# Check for anomalous scattering factor
if 'anomalous_real' in group:
rx.anomalous_real = Tabulated1D.from_hdf5(group['anomalous_real'])
if 'anomalous_imag' in group:
rx.anomalous_imag = Tabulated1D.from_hdf5(group['anomalous_imag'])
# Check for factors / scattering functions
if 'scattering_factor' in group:
rx.scattering_factor = Tabulated1D.from_hdf5(group['scattering_factor'])
return rx
def to_hdf5(self, group, energy, Z):
"""Write photon reaction to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
energy : Iterable of float
arrays of energies at which cross sections are tabulated at
Z : int
atomic number
"""
# Write cross sections
if self.mt >= 534 and self.mt <= 572:
# Determine threshold
threshold = self.xs.x[0]
idx = np.searchsorted(energy, threshold, side='right') - 1
# Interpolate cross section onto union grid and write
photoionization = self.xs(energy[idx:])
group.create_dataset('xs', data=photoionization)
assert len(energy) == len(photoionization) + idx
group['xs'].attrs['threshold_idx'] = idx
else:
group.create_dataset('xs', data=self.xs(energy))
# Write scattering factor
if self.scattering_factor is not None:
if self.mt == 502:
# Create integrated form factor
ff = deepcopy(self.scattering_factor)
ff.x *= ff.x
ff.y *= ff.y/Z**2
int_ff = Tabulated1D(ff.x, ff.integral())
int_ff.to_hdf5(group, 'integrated_scattering_factor')
self.scattering_factor.to_hdf5(group, 'scattering_factor')
if self.anomalous_real is not None:
self.anomalous_real.to_hdf5(group, 'anomalous_real')
if self.anomalous_imag is not None:
self.anomalous_imag.to_hdf5(group, 'anomalous_imag')
| mit |
Leguark/pynoddy | pynoddy/Copy of history.py | 3 | 46835 | '''Noddy history file wrapper
Created on 24/03/2014
@author: Florian Wellmann
'''
import time # for header in model generation
import numpy as np
# import numpy as np
# import matplotlib.pyplot as plt
import events
class NoddyHistory():
"""Class container for Noddy history files"""
def __init__(self, history=None, **kwds):
"""Methods to analyse and change Noddy history files
**Arguments**:
- *history* = string : Name of Noddy history file
**Optional Keywords**:
- *url* = url : link to history file on web (e.g. to download
and open directly from Atlas of Structural Geophysics,
http://virtualexplorer.com.au/special/noddyatlas/index.html
Note: if both a (local) history is given and a URL, the local
file is opened!
"""
if history is None:
if kwds.has_key("url"):
self.load_history_from_url(kwds['url'])
self.determine_events()
else:
# generate a new history
self.create_new_history()
else:
# load existing history
self.load_history(history)
self.determine_events()
def info(self, **kwds):
"""Print out model information
**Optional keywords**:
- *events_only* = bool : only information on events
"""
events_only = kwds.get("events_only", False)
if not events_only:
# First: check if all information available
if not hasattr(self, 'extent_x'): self.get_extent()
if not hasattr(self, 'origin_x'): self.get_origin()
if not hasattr(self, 'cube_size'): self.get_cube_size()
if not hasattr(self, 'filename'): self.get_filename()
if not hasattr(self, 'date_saved'): self.get_date_saved()
print(60 * "*" + "\n\t\t\tModel Information\n" + 60 * "*")
print("\n")
if self.n_events == 0:
print("The model does not yet contain any events\n")
else:
print("This model consists of %d events:" % self.n_events)
for k,ev in self.events.items():
print("\t(%d) - %s" % (k,ev.event_type))
if not events_only:
print("The model extent is:")
print("\tx - %.1f m" % self.extent_x)
print("\ty - %.1f m" % self.extent_y)
print("\tz - %.1f m" % self.extent_z)
print("Number of cells in each direction:")
print("\tnx = %d" % (self.extent_x / self.cube_size))
print("\tny = %d" % (self.extent_y / self.cube_size))
print("\tnz = %d" % (self.extent_z / self.cube_size))
print("The model origin is located at: \n\t(%.1f, %.1f, %.1f)" % (self.origin_x,
self.origin_y,
self.origin_z))
print("The cubesize for model export is: \n\t%d m" % self.cube_size)
# and now some metadata
print("\n")
print(60 * "*" + "\n\t\t\tMeta Data\n" + 60 * "*")
print("\n")
print("The filename of the model is:\n\t%s" % self.filename)
print("It was last saved (if origin was a history file!) at:\n\t%s\n" % self.date_saved)
def get_origin(self):
"""Get coordinates of model origin and return and store in local variables
**Returns**: (origin_x, origin_y, origin_z)
"""
for i,line in enumerate(self.history_lines):
if "Origin X" in line:
self.origin_x = float(self.history_lines[i].split("=")[1])
self.origin_y = float(self.history_lines[i+1].split("=")[1])
self.origin_z = float(self.history_lines[i+2].split("=")[1])
break
return(self.origin_x, self.origin_y, self.origin_z)
def set_origin(self, origin_x, origin_y, origin_z):
"""Set coordinates of model origin and update local variables
**Arguments**:
- *origin_x* = float : x-location of model origin
- *origin_y* = float : y-location of model origin
- *origin_z* = float : z-location of model origin
"""
self.origin_x = origin_x
self.origin_y = origin_y
self.origin_z = origin_z
origin_x_line = " Origin X = %.2f\n" % origin_x
origin_y_line = " Origin Y = %.2f\n" % origin_y
origin_z_line = " Origin Z = %.2f\n" % origin_z
for i,line in enumerate(self.history_lines):
if "Origin X" in line:
self.history_lines[i] = origin_x_line
self.history_lines[i+1] = origin_y_line
self.history_lines[i+2] = origin_z_line
break
def get_extent(self):
"""Get model extent and return and store in local variables
**Returns**: (extent_x, extent_y, extent_z)
"""
for i,line in enumerate(self.history_lines):
if "Length X" in line:
self.extent_x = float(self.history_lines[i].split("=")[1])
self.extent_y = float(self.history_lines[i+1].split("=")[1])
self.extent_z = float(self.history_lines[i+2].split("=")[1])
break
return(self.extent_x, self.extent_y, self.extent_z)
def set_extent(self, extent_x, extent_y, extent_z):
"""Set model extent and update local variables
**Arguments**:
- *extent_x* = float : extent in x-direction
- *extent_y* = float : extent in y-direction
- *extent_z* = float : extent in z-direction
"""
self.extent_x = extent_x
self.extent_y = extent_y
self.extent_z = extent_z
extent_x_line = " Length X = %.2f\n" % extent_x
extent_y_line = " Length Y = %.2f\n" % extent_y
extent_z_line = " Length Z = %.2f\n" % extent_z
for i,line in enumerate(self.history_lines):
if "Length X" in line:
self.history_lines[i] = extent_x_line
self.history_lines[i+1] = extent_y_line
self.history_lines[i+2] = extent_z_line
break
def get_drillhole_data(self, x, y, **kwds):
"""Get geology values along 1-D profile at position x,y with a 1 m resolution
The following steps are performed:
1. creates a copy of the entire object,
2. sets values of origin, extent and geology cube size,
3. saves model to a temporary file,
4. runs Noddy on that file
5. opens and analyses output
6. deletes temporary files
Note: this method only works if write access to current directory
is enabled and noddy can be executed!
**Arguments**:
- *x* = float: x-position of drillhole
- *y* = float: y-positoin of drillhole
**Optional Arguments**:
- *z_min* = float : minimum depth of drillhole (default: model range)
- *z_max* = float : maximum depth of drillhole (default: model range)
- *resolution* = float : resolution along profile (default: 1 m)
"""
# resolve keywords
resolution = kwds.get("resolution", 1)
self.get_extent()
self.get_origin()
z_min = kwds.get("z_min", self.origin_z)
z_max = kwds.get("z_max", self.extent_z)
# 1. create copy
import copy
tmp_his = copy.deepcopy(self)
# 2. set values
tmp_his.set_origin(x, y, z_min)
tmp_his.set_extent(resolution, resolution, z_max)
tmp_his.change_cube_size(resolution)
# 3. save temporary file
tmp_his_file = "tmp_1D_drillhole.his"
tmp_his.write_history(tmp_his_file)
tmp_out_file = "tmp_1d_out"
# 4. run noddy
import pynoddy
import pynoddy.output
pynoddy.compute_model(tmp_his_file, tmp_out_file)
# 5. open output
tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
# 6.
return tmp_out.block[0,0,:]
def load_history(self, history):
"""Load Noddy history
**Arguments**:
- *history* = string : Name of Noddy history file
"""
self.history_lines = open(history, 'r').readlines()
def _get_footer_lines(self):
"""Get the footer lines from self.history_lines
The footer contains everything below events (all settings, etc.)"""
# get id of footer from history lines
for i,line in enumerate(self.history_lines):
if "#BlockOptions" in line:
break
print i
def load_history_from_url(self, url):
"""Directly load a Noddy history from a URL
This method is useful to load a model from the Structural Geophysics
Atlas on the pages of the Virtual Explorer.
See: http://virtualexplorer.com.au/special/noddyatlas/index.html
**Arguments**:
- *url* : url of history file
"""
import urllib2
response = urllib2.urlopen(url)
tmp_lines = response.read().split("\n")
self.history_lines = []
for line in tmp_lines:
# append EOL again for consistency
self.history_lines.append(line + "\n")
def determine_model_stratigraphy(self):
"""Determine stratigraphy of entire model from all events"""
self.model_stratigraphy = []
for e in np.sort(self.events.keys()):
if self.events[e].event_type == 'STRATIGRAPHY':
self.model_stratigraphy += self.events[e].layer_names
def determine_events(self):
"""Determine events and save line numbers
.. note:: Parsing of the history file is based on a fixed Noddy output order.
If this is, for some reason (e.g. in a changed version of Noddy) not the case, then
this parsing might fail!
"""
self._raw_events = []
for i,line in enumerate(self.history_lines):
if "No of Events" in line:
self.n_events = int(line.split("=")[1])
elif "Event #" in line:
event = {}
event['type'] = line.split('=')[1].rstrip()
event['num'] = int(line[7:9])
event['line_start'] = i
self._raw_events.append(event)
# finally: if the definition for BlockOptions starts, the event definition is over
elif "BlockOptions" in line:
last_event_stop = i-2
# now: find the line ends for the single event blocks
for i,event in enumerate(self._raw_events[1:]):
self._raw_events[i]['line_end'] = event['line_start']-1
# now adjust for last event
self._raw_events[-1]['line_end'] = last_event_stop
self.events = {} # idea: create events as dictionary so that it is easier
# to swap order later!
# now create proper event objects for these events
for e in self._raw_events:
event_lines = self.history_lines[e['line_start']:e['line_end']+1]
print e['type']
if 'FAULT' in e['type']:
ev = events.Fault(lines = event_lines)
# set specific aspects first
elif 'FOLD' in e['type']:
ev = events.Fold(lines = event_lines)
elif 'UNCONFORMITY' in e['type']:
ev = events.Unconformity(lines = event_lines)
elif 'STRATIGRAPHY' in e['type']:
ev = events.Stratigraphy(lines = event_lines)
elif 'TILT' in e['type']: # AK
ev = events.Tilt(lines = event_lines)
else: continue
# now set shared attributes (those defined in superclass Event)
order = e['num']
self.events[order] = ev
# determine overall begin and end of the history events
self.all_events_begin = self._raw_events[0]['line_start']
self.all_events_end = self._raw_events[-1]['line_end']
def get_cube_size(self):
"""Determine cube size for model export"""
for line in self.history_lines:
if 'Geophysics Cube Size' in line:
self.cube_size = float(line.split('=')[1].rstrip())
def get_filename(self):
"""Determine model filename from history file/ header"""
self.filename = self.history_lines[0].split('=')[1].rstrip()
def get_date_saved(self):
"""Determine the last savepoint of the file"""
self.date_saved = self.history_lines[1].split('=')[1].rstrip()
def change_cube_size(self, cube_size):
"""Change the model cube size (isotropic)
**Arguments**:
- *cube_size* = float : new model cube size
"""
# create local copy of history
lines_new = self.history_lines[:]
for i,line in enumerate(self.history_lines):
if 'Geophysics Cube Size' in line:
l = line.split('=')
l_new = '%7.2f\r\n' % cube_size
line_new = l[0] + "=" + l_new
lines_new[i] = line_new
# assign changed lines back to object
self.history_lines = lines_new[:]
def write_history_bak(self, filename):
"""Write history to new file
**Arguments**:
- *filename* = string : filename of new history file
.. hint:: Just love it how easy it is to 'write history' with Noddy ;-)
"""
# before saving: update all event properties (in case changes were made)
self.update_all_event_properties()
# First step: update history lines with events
all_event_lines = []
for event_id in sorted(self.events.keys()):
for line in self.events[event_id].event_lines:
all_event_lines.append(line)
# now substitute old with new lines:
self.history_lines[self.all_events_begin:self.all_events_end+1] = all_event_lines
f = open(filename, 'w')
for line in self.history_lines:
f.write(line)
f.close()
def swap_events(self, event_num_1, event_num_2):
"""Swap two geological events in the timeline
**Arguments**:
- *event_num_1/2* = int : number of events to be swapped ("order")
"""
# events have to be copied, otherwise only a reference is passed!
event_tmp = self.events[event_num_1]
self.events[event_num_1] = self.events[event_num_2]
self.events[event_num_2] = event_tmp
self.update_event_numbers()
def reorder_events(self, reorder_dict):
"""Reorder events accoring to assignment in reorder_dict
**Arguments**:
- *reorder_dict* = dict : for example {1 : 2, 2 : 3, 3 : 1}
"""
tmp_events = self.events.copy()
for key, value in reorder_dict.items():
tmp_events[value] = self.events[key]
self.events = tmp_events.copy()
self.update_event_numbers()
def update_event_numbers(self):
"""Update event numbers in 'Event #' line in noddy history file"""
for key, event in self.events.items():
event.set_event_number(key)
def update_all_event_properties(self):
"""Update properties of all events - in case changes were made"""
for event in self.events.values():
event.update_properties()
#
#class NewHistory():
# """Methods to create a Noddy model"""
#
def create_new_history(self):
"""Methods to create a Noddy model
"""
# set event counter
self.event_counter = 0
self.all_events_begin = 7 # default after header
self.all_events_end = 7
# initialise history lines
self.history_lines = []
self.events = {}
def get_ev_counter(self):
"""Event counter for implicit and continuous definition of events"""
self.event_counter += 1
return self.event_counter
def add_event(self, event_type, event_options, **kwds):
"""Add an event type to history
**Arguments**:
- *event_type* = string : type of event, legal options to date are:
'stratigraphy', 'fault', 'fold', 'unconformity'
- *event_options* = list : required options to create event (event dependent)
**Optional keywords**:
- *event_num* = int : event number (default: implicitly defined with increasing counter)
"""
event_num = kwds.get("event_num", self.get_ev_counter())
if event_type == 'stratigraphy':
ev = self._create_stratigraphy(event_options)
ev.event_type = 'STRATIGRAPHY'
elif event_type == 'fault':
ev = self._create_fault(event_options)
ev.event_type = 'FAULT'
elif event_type == 'tilt': # AK
ev = self._create_tilt(event_options)
ev.event_type = 'TILT'
elif event_type == 'unconformity': # AK
ev = self._create_unconformity(event_options)
ev.event_type = 'UNCONFORMITY'
else:
raise NameError('Event type %s not (yet) implemented' % event_type)
ev.set_event_number(event_num)
self.events[event_num] = ev
# update beginning and ending of events in history
self.all_events_end = self.all_events_end + len(ev.event_lines)
# add event to history lines, as well (for consistency with other methods)
self.history_lines[:self.all_events_begin] + \
ev.event_lines + \
self.history_lines[self.all_events_end:]
def _create_header(self):
"""Create model header, include actual date"""
t = time.localtime() # get current time
time_string = "%d/%d/%d %d:%d:%d" % (t.tm_mday,
t.tm_mon,
t.tm_year,
t.tm_hour,
t.tm_min,
t.tm_sec)
self.header_lines = """#Filename = """ + self.filename + """
#Date Saved = """ + time_string + """
FileType = 111
Version = 7.11
"""
def _create_stratigraphy(self, event_options):
"""Create a stratigraphy event
**Arguments**:
- *event_options* = list : list of required and optional settings for event
Options are:
'num_layers' = int : number of layers (required)
'layer_names' = list of strings : names for layers (default names otherwise)
'layer_thickness' = list of floats : thicknesses for all layers
"""
ev = events.Stratigraphy()
tmp_lines = [""]
tmp_lines.append("\tNum Layers\t= %d" % event_options['num_layers'])
for i in range(event_options['num_layers']):
"""Add stratigraphy layers"""
layer_name = event_options['layer_names'][i]
cum_thickness = np.cumsum(event_options['layer_thickness'])
layer_lines = _Templates().strati_layer
# now replace required variables
layer_lines = layer_lines.replace("$NAME$", layer_name)
layer_lines = layer_lines.replace("$HEIGHT$", "%.1f" % cum_thickness[i])
layer_lines = layer_lines.replace(" ", "\t")
# split lines and add to event lines list:
for layer_line in layer_lines.split("\n"):
tmp_lines.append(layer_line)
# append event name
tmp_lines.append("""\tName\t= Strat
""")
# event lines are defined in list:
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
ev.num_layers = event_options['num_layers']
return ev
def _create_fault(self, event_options):
"""Create a fault event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of fault event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'dip_dir' = [0,360] : dip direction of fault
'dip' = [0,90] : dip angle of fault
'slip' = float : slip along fault
"""
ev = events.Fault()
tmp_lines = [""]
fault_lines = _Templates.fault
# substitute text with according values
fault_lines = fault_lines.replace("$NAME$", event_options['name'])
fault_lines = fault_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
fault_lines = fault_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % z)
else:
fault_lines = fault_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
fault_lines = fault_lines.replace("$DIP_DIR$", "%.1f" % event_options['dip_dir'])
fault_lines = fault_lines.replace("$DIP$", "%.1f" % event_options['dip'])
fault_lines = fault_lines.replace("$SLIP$", "%.1f" % event_options['slip'])
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for layer_line in fault_lines.split("\n"):
tmp_lines.append(layer_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
# AK 2014-10
def _create_tilt(self, event_options):
"""Create a tilt event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of fault event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'rotation' = [0,360] : dip?
'plunge_direction' = [0,360] : strike of plunge, measured from x axis
'plunge' = float : ?
"""
ev = events.Tilt()
tmp_lines = [""]
tilt_lines = _Templates.tilt
# substitute text with according values
tilt_lines = tilt_lines.replace("$NAME$", event_options['name'])
tilt_lines = tilt_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
tilt_lines = tilt_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
tilt_lines = tilt_lines.replace("$POS_Z$", "%.1f" % z)
else:
tilt_lines = tilt_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
tilt_lines = tilt_lines.replace("$ROTATION$", "%.1f" % event_options['rotation'])
tilt_lines = tilt_lines.replace("$PLUNGE_DIRECTION$", "%.1f" % event_options['plunge_direction'])
tilt_lines = tilt_lines.replace("$PLUNGE$", "%.1f" % event_options['plunge'])
# now split lines and add as list entries to event lines
# event lines are defined in list:
# split lines and add to event lines list:
for tilt_line in tilt_lines.split("\n"):
tmp_lines.append(tilt_line)
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
# AK 2014-10
def _create_unconformity(self, event_options):
"""Create a unconformity event
**Arguments**:
- *event_options* = list : list of required and optional settings for event;
Options are:
'name' = string : name of unconformity event
'pos' = (x,y,z) : position of reference point (floats)
.. note:: for convenience, it is possible to assign 'top' to z
for position at "surface"
'rotation' = [0,360] : dip?
'plunge_direction' = [0,360] : strike of plunge, measured from x axis
'plunge' = float : ?
"""
ev = events.Unconformity()
tmp_lines = [""]
unconformity_lines = _Templates.unconformity
# substitute text with according values
unconformity_lines = unconformity_lines.replace("$NAME$", event_options['name'])
unconformity_lines = unconformity_lines.replace("$POS_X$", "%.1f" % event_options['pos'][0])
unconformity_lines = unconformity_lines.replace("$POS_Y$", "%.1f" % event_options['pos'][1])
if event_options['pos'] == 'top':
# recalculate z-value to be at top of model
z = self.zmax
unconformity_lines = unconformity_lines.replace("$POS_Z$", "%.1f" % z)
else:
unconformity_lines = unconformity_lines.replace("$POS_Z$", "%.1f" % event_options['pos'][2])
unconformity_lines = unconformity_lines.replace("$DIP_DIRECTION$", "%.1f" % event_options['dip_direction'])
unconformity_lines = unconformity_lines.replace("$DIP$", "%.1f" % event_options['dip'])
# split lines and add to event lines list:
for unconformity_line in unconformity_lines.split("\n"):
tmp_lines.append(unconformity_line)
# unconformity has a stratigraphy block
tmp_lines.append("\tNum Layers\t= %d" % event_options['num_layers'])
for i in range(event_options['num_layers']):
"""Add stratigraphy layers"""
layer_name = event_options['layer_names'][i]
cum_thickness = np.cumsum(event_options['layer_thickness'])
layer_lines = _Templates().strati_layer
# now replace required variables
layer_lines = layer_lines.replace("$NAME$", layer_name)
layer_lines = layer_lines.replace("$HEIGHT$", "%.1f" % cum_thickness[i])
layer_lines = layer_lines.replace(" ", "\t")
# split lines and add to event lines list:
for layer_line in layer_lines.split("\n"):
tmp_lines.append(layer_line)
# append event name
tmp_lines.append("""\tName\t= Strat""")
tmp_lines_list = []
for line in tmp_lines:
tmp_lines_list.append(line + "\n")
ev.set_event_lines(tmp_lines_list)
return ev
def change_event_params(self, changes_dict):
"""Change multiple event parameters according to settings in changes_dict
**Arguments**:
- *changes_dict* = dictionary : entries define relative changes for (multiple) parameters
Per default, the values in the dictionary are added to the event parameters.
"""
# print changes_dict
for key,sub_dict in changes_dict.items():
for sub_key, val in sub_dict.items():
self.events[key].properties[sub_key] += val
def write_history(self, filename):
"""Write history to new file
**Arguments**:
- *filename* = string : filename of new history file
.. hint:: Just love it how easy it is to 'write history' with Noddy ;-)
"""
# first: create header
self.filename = filename
self._create_header()
# initialise history lines
history_lines = []
# add header
for line in self.header_lines.split("\n"):
history_lines.append(line + "\n")
# add number of events
history_lines.append("No of Events\t= %d\n" % len(self.events))
# add events
for event_id in sorted(self.events.keys()):
for line in self.events[event_id].event_lines:
history_lines.append(line)
# add footer
for line in _Templates().footer.split("\n"):
line = line.replace(" ", "\t")
history_lines.append(line + "\n")
f = open(filename, 'w')
for line in history_lines:
f.write(line)
f.close()
# print history_lines
#
# # First step: update history lines with events
# all_event_lines = []
# for event_id in sorted(self.events.keys()):
# for line in self.events[event_id].event_lines:
# all_event_lines.append(line)
# # now substitute old with new lines:
# self.history_lines[self.all_events_begin:self.all_events_end+1] = all_event_lines
#
#
# f = open(filename, 'w')
# for line in self.history_lines:
# f.write(line)
# f.close()
#
#===============================================================================
# Templates for Noddy history file
#===============================================================================
class _Templates():
header = """#Filename = simple_two_faults.his
#Date Saved = 24/3/2014 14:21:0
FileType = 111
Version = 7.11"""
strati_layer = """ Unit Name = $NAME$
Height = $HEIGHT$
Apply Alterations = ON
Density = 4.00e+000
Anisotropic Field = 0
MagSusX = 1.60e-003
MagSusY = 1.60e-003
MagSusZ = 1.60e-003
MagSus Dip = 9.00e+001
MagSus DipDir = 9.00e+001
MagSus Pitch = 0.00e+000
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-003
Color Name = Color 92
Red = 0
Green = 153
Blue = 48 """
fault = """ Geometry = Translation
Movement = Hanging Wall
X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIR$
Dip = $DIP$
Pitch = 90.00
Slip = $SLIP$
Rotation = 30.00
Amplitude = 2000.00
Radius = 1000.00
XAxis = 2000.00
YAxis = 2000.00
ZAxis = 2000.00
Cyl Index = 0.00
Profile Pitch = 90.00
Color Name = Custom Colour 8
Red = 0
Green = 0
Blue = 254
Fourier Series
Term A 0 = 0.00
Term B 0 = 0.00
Term A 1 = 0.00
Term B 1 = 1.00
Term A 2 = 0.00
Term B 2 = 0.00
Term A 3 = 0.00
Term B 3 = 0.00
Term A 4 = 0.00
Term B 4 = 0.00
Term A 5 = 0.00
Term B 5 = 0.00
Term A 6 = 0.00
Term B 6 = 0.00
Term A 7 = 0.00
Term B 7 = 0.00
Term A 8 = 0.00
Term B 8 = 0.00
Term A 9 = 0.00
Term B 9 = 0.00
Term A 10 = 0.00
Term B 10 = 0.00
Name = Fault Plane
Type = 1
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 6.280000
Min Y Scale = -1.000000
Max Y Scale = 1.000000
Scale Origin = 0.000000
Min Y Replace = -1.000000
Max Y Replace = 1.000000
Num Points = 21
Point X = 0
Point Y = 0
Point X = 31
Point Y = 30
Point X = 62
Point Y = 58
Point X = 94
Point Y = 80
Point X = 125
Point Y = 94
Point X = 157
Point Y = 99
Point X = 188
Point Y = 95
Point X = 219
Point Y = 81
Point X = 251
Point Y = 58
Point X = 282
Point Y = 31
Point X = 314
Point Y = 0
Point X = 345
Point Y = -31
Point X = 376
Point Y = -59
Point X = 408
Point Y = -81
Point X = 439
Point Y = -95
Point X = 471
Point Y = -100
Point X = 502
Point Y = -96
Point X = 533
Point Y = -82
Point X = 565
Point Y = -59
Point X = 596
Point Y = -32
Point X = 628
Point Y = -1
Alteration Type = NONE
Num Profiles = 12
Name = Density
Type = 2
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 4.000000
Scale Origin = 1.000000
Min Y Replace = 0.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = -50
Point X = 628
Point Y = -50
Name = Anisotropy
Type = 3
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - X Axis (Sus)
Type = 4
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Y Axis (Sus)
Type = 5
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Z Axis (Sus)
Type = 6
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = 2.000000
Max Y Replace = 8.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Dip (Sus)
Type = 7
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -180.000000
Max Y Scale = 180.000000
Scale Origin = 1.000000
Min Y Replace = -180.000000
Max Y Replace = 180.000000
Num Points = 2
Point X = 0
Point Y = 1
Point X = 628
Point Y = 1
Name = - Dip Dir (Sus)
Type = 8
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Pitch (Sus)
Type = 9
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = Remanence
Type = 10
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -10.000000
Max Y Scale = 10.000000
Scale Origin = 0.000000
Min Y Replace = -10.000000
Max Y Replace = 10.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Declination (Rem)
Type = 11
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Inclination (Rem)
Type = 12
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -360.000000
Max Y Scale = 360.000000
Scale Origin = 1.000000
Min Y Replace = -360.000000
Max Y Replace = 360.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Name = - Intensity (Rem)
Type = 13
Join Type = LINES
Graph Length = 200.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = -5.000000
Max Y Scale = 5.000000
Scale Origin = 0.000000
Min Y Replace = -5.000000
Max Y Replace = 5.000000
Num Points = 2
Point X = 0
Point Y = 0
Point X = 628
Point Y = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = \\psf\Home
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000
Name = $NAME$"""
# AK 2014-10
tilt = """X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Rotation = $ROTATION$
Plunge Direction = $PLUNGE_DIRECTION$
Plunge = $PLUNGE$
Name = $NAME$"""
unconformity = """X = $POS_X$
Y = $POS_Y$
Z = $POS_Z$
Dip Direction = $DIP_DIRECTION$
Dip = $DIP$
Alteration Type = NONE
Num Profiles = 1
Name =
Type = 0
Join Type = LINES
Graph Length = 0.000000
Min X = 0.000000
Max X = 0.000000
Min Y Scale = 0.000000
Max Y Scale = 0.000000
Scale Origin = 0.000000
Min Y Replace = 0.000000
Max Y Replace = 0.000000
Num Points = 0
Surface Type = FLAT_SURFACE
Surface Filename =
Surface Directory = /tmp_mnt/sci6/users/mark/Atlas/case
Surface XDim = 0.000000
Surface YDim = 0.000000
Surface ZDim = 0.000000"""
temp = """
Num Layers = 5
Unit Name = UC Base
Height = -32000
Apply Alterations = ON
Density = 3.50e+00
Anisotropic Field = 0
MagSusX = 1.50e-06
MagSusY = 1.60e-03
MagSusZ = 1.60e-03
MagSus Dip = 9.00e+01
MagSus DipDir = 9.00e+01
MagSus Pitch = 0.00e+00
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-03
Color Name = Color 98
Red = 84
Green = 153
Blue = 0
Unit Name = UC Layer 1
Height = 5650
Apply Alterations = ON
Density = 3.50e+00
Anisotropic Field = 0
MagSusX = 1.50e-06
MagSusY = 1.60e-03
MagSusZ = 1.60e-03
MagSus Dip = 9.00e+01
MagSus DipDir = 9.00e+01
MagSus Pitch = 0.00e+00
Remanent Magnetization = 0
Inclination = 30.00
Angle with the Magn. North = 30.00
Strength = 1.60e-03
Color Name = Color 68
Red = 204
Green = 117
Blue = 0
Name = $NAME$"""
# everything below events
footer = """#BlockOptions
Number of Views = 1
Current View = 0
NAME = Default
Origin X = 0.00
Origin Y = 0.00
Origin Z = 5000.00
Length X = 10000.00
Length Y = 7000.00
Length Z = 5000.00
Geology Cube Size = 50.00
Geophysics Cube Size = 50.00
#GeologyOptions
Scale = 10.00
SectionDec = 90.00
WellDepth = 5000.00
WellAngleZ = 0.00
BoreholeX = 0.00
BoreholeX = 0.00
BoreholeX = 5000.00
BoreholeDecl = 90.00
BoreholeDip = 0.00
BoreholeLength = 5000.00
SectionX = 0.00
SectionY = 0.00
SectionZ = 5000.00
SectionDecl = 90.00
SectionLength = 10000.00
SectionHeight = 5000.00
topofile = FALSE
Topo Filename =
Topo Directory = .
Topo Scale = 1.00
Topo Offset = 0.00
Topo First Contour = 100.00
Topo Contour Interval = 100.00
Chair Diagram = FALSE
Chair_X = 5000.00
Chair_Y = 3500.00
Chair_Z = 2500.00
#GeophysicsOptions
GPSRange = 0
Declination = 0.00
Inclination = -67.00
Intensity = 63000.00
Field Type = FIXED
Field xPos = 0.00
Field yPos = 0.00
Field zPos = 5000.00
Inclination Ori = 0.00
Inclination Change = 0.00
Intensity Ori = 90.00
Intensity Change = 0.00
Declination Ori = 0.00
Declination Change = 0.00
Altitude = 80.00
Airborne= FALSE
Calculation Method = SPATIAL
Spectral Padding Type = RECLECTION_PADDING
Spectral Fence = 100
Spectral Percent = 100
Constant Boxing Depth = 0.00
Clever Boxing Ratio = 1.00
Deformable Remanence= FALSE
Deformable Anisotropy= TRUE
Vector Components= FALSE
Project Vectors= TRUE
Pad With Real Geology= FALSE
Draped Survey= FALSE
#3DOptions
Declination = 150.000000
Elevation = 30.000000
Scale = 1.000000
Offset X = 1.000000
Offset Y = 1.000000
Offset Z = 1.000000
Fill Type = 2
#ProjectOptions
Susceptibility Units = CGS
Geophysical Calculation = 2
Calculation Type = LOCAL_JOB
Length Scale = 0
Printing Scale = 1.000000
Image Scale = 10.000000
New Windows = FALSE
Background Red Component = 254
Background Green Component = 254
Background Blue Component = 254
Internet Address = 255.255.255.255
Account Name =
Noddy Path = ./noddy
Help Path = iexplore %h
Movie Frames Per Event = 3
Movie Play Speed = 10.00
Movie Type = 0
Gravity Clipping Type = RELATIVE_CLIPPING
Gravity Image Display Clip Min = 0.000000
Gravity Image Display Clip Max = 100.000000
Gravity Image Display Type = GREY
Gravity Image Display Num Contour = 25
Magnetics Clipping Type = RELATIVE_CLIPPING
Magnetics Image Display Clip Min = 0.000000
Magnetics Image Display Clip Max = 100.000000
Magnetics Image Display Type = GREY
Magnetics Image Display Num Contour = 25
False Easting = 0.000000
False Northing = 0.000000
#Window Positions
Num Windows = 16
Name = Block Diagram
X = 60
Y = 60
Width = 500
Height = 300
Name = Movie
X = 60
Y = 60
Width = -1
Height = -1
Name = Well Log
X = 60
Y = 60
Width = 400
Height = 430
Name = Section
X = 14
Y = 16
Width = 490
Height = -1
Name = Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Topography Map
X = 60
Y = 60
Width = 490
Height = 375
Name = 3D Stratigraphy
X = 60
Y = 60
Width = 490
Height = 375
Name = Line Map
X = 60
Y = 60
Width = 490
Height = -1
Name = Profile - From Image
X = 60
Y = 60
Width = 490
Height = 600
Name = Sterographic Projections
X = 60
Y = 60
Width = 430
Height = 430
Name = Stratigraphic Column
X = 60
Y = 60
Width = 230
Height = 400
Name = Image
X = 30
Y = 30
Width = -1
Height = -1
Name = Contour
X = 30
Y = 30
Width = -1
Height = -1
Name = Toolbar
X = 10
Y = 0
Width = -1
Height = -1
Name = History
X = 229
Y = 160
Width = 762
Height = 898
Name = History
X = 229
Y = 160
Width = 762
Height = 898
#Icon Positions
Num Icons = 3
Row = 1
Column = 1
X Position = 1
Y Position = 1
Row = 1
Column = 2
X Position = 4
Y Position = 1
Row = 1
Column = 3
X Position = 7
Y Position = 1
Floating Menu Rows = 1
Floating Menu Cols = 24
End of Status Report"""
if __name__ == '__main__':
# some testing and debugging:
import os
os.chdir(r'/Users/Florian/git/pynoddy/sandbox')
H1 = NoddyHistory("../examples/simple_two_faults.his")
H1.swap_events(2, 3)
H1.write_history("test")
H2 = NoddyHistory("test")
H2.events[2].properties['Dip'] = 12
H2.write_history("test2")
| gpl-2.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/test_packers.py | 1 | 30648 | import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assertTrue(i.equals(i_rec))
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
self.assertRaises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
with warnings.catch_warnings(record=True):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pydata/pandas/pull/9783
"""
def setUp(self):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setUp()
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
self.assertTrue(block.values.flags.writeable)
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(self, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with patch(compress_module, 'decompress', decompress), \
tm.assert_produces_warning(PerformanceWarning) as ws:
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
self.assertTrue(block.values.flags.writeable)
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
self.assertEqual(
str(w.message),
'copying data after decompressing; this may mean that'
' decompress is caching its result',
)
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
self.assertEqual(buf, control_buf)
def test_compression_warns_when_decompress_caches_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_compression_warns_when_decompress_caches('zlib')
def test_compression_warns_when_decompress_caches_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_compression_warns_when_decompress_caches('blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
np.testing.assert_array_equal(empty_unpacked, empty)
self.assertTrue(empty_unpacked.flags.writeable)
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
np.testing.assert_array_equal(char_unpacked, char)
self.assertTrue(char_unpacked.flags.writeable)
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
self.assertEqual(ord(b'a'), ord(u'a'))
np.testing.assert_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
self.assertTrue(1 in self.encode_decode(df1['A'], compress='blosc'))
self.assertTrue(1. in self.encode_decode(df2['A'], compress='blosc'))
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
self.assertTrue(1 in self.encode_decode(df1['A'], compress='zlib'))
self.assertTrue(1. in self.encode_decode(df2['A'], compress='zlib'))
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
raise nose.SkipTest('no blosc')
if not self._SQLALCHEMY_INSTALLED:
raise nose.SkipTest('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
raise nose.SkipTest('no zlib')
if not self._SQLALCHEMY_INSTALLED:
raise nose.SkipTest('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setUp(self):
super(TestEncoding, self).setUp()
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in compat.itervalues(self.frame):
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
self.assertEqual(result, expected)
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
class TestMsgpack():
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
NOTE: TestMsgpack can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
"""
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_msgpack_data, create_data)
self.data = create_msgpack_data()
self.all_data = create_data()
self.path = u('__%s__.msgpack' % tm.rands(10))
self.minimum_structure = {'series': ['float', 'int', 'mixed',
'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
assert kind in data[
typ], '"{0}" not found in data["{1}"]'.format(kind, typ)
def compare(self, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
if LooseVersion(version) < '0.18.0':
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
self.check_min_structure(data)
for typ, dv in data.items():
assert typ in self.all_data, ('unpacked data contains '
'extra key "{0}"'
.format(typ))
for dt, result in dv.items():
assert dt in self.all_data[typ], ('data["{0}"] contains extra '
'key "{1}"'.format(typ, dt))
try:
expected = self.data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comparator = getattr(
self, "compare_{typ}_{dt}".format(typ=typ, dt=dt), None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def read_msgpacks(self, version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
# GH12142 0.17 files packed in P2 can't be read in P3
if (compat.PY3 and
version.startswith('0.17.') and
f.split('.')[-4][-1] == '2'):
continue
vf = os.path.join(pth, f)
try:
self.compare(vf, version)
except ImportError:
# blosc not installed
continue
n += 1
assert n > 0, 'Msgpack files are not tested'
def test_msgpack(self):
msgpack_path = tm.get_data_path('legacy_msgpack')
n = 0
for v in os.listdir(msgpack_path):
pth = os.path.join(msgpack_path, v)
if os.path.isdir(pth):
yield self.read_msgpacks, v
n += 1
assert n > 0, 'Msgpack files are not tested'
| mit |
larsoner/mne-python | mne/source_estimate.py | 2 | 127983 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import contextlib
import copy
import os.path as op
from types import GeneratorType
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix, block_diag as sparse_block_diag
from .baseline import rescale
from .cov import Covariance
from .evoked import _get_peak
from .filter import resample
from .io.constants import FIFF
from .surface import (read_surface, _get_ico_surface, mesh_edges,
_project_onto_surface)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject, SourceSpaces, _get_src_nn,
_import_nibabel, _get_mri_info_data,
_get_atlas_values, _check_volume_labels,
read_freesurfer_lut)
from .transforms import _get_trans, apply_trans
from .utils import (get_subjects_dir, _check_subject, logger, verbose, _pl,
_time_mask, warn, copy_function_doc_to_method_doc,
fill_doc, _check_option, _validate_type, _check_src_normal,
_check_stc_units, _check_pandas_installed,
_check_pandas_index_arguments, _convert_times, _ensure_int,
_build_data_frame, _check_time_format, _check_path_like,
sizeof_fmt, object_size)
from .viz import (plot_source_estimates, plot_vector_source_estimates,
plot_volume_source_estimates)
from .io.base import TimeMixin
from .io.meas_info import Info
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
"""Aux Function."""
with open(filename, 'rb') as fid:
buf = fid.read()
stc = dict()
offset = 0
num_bytes = 4
# read tmin in ms
stc['tmin'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tmin'] /= 1000.0
offset += num_bytes
# read sampling rate in ms
stc['tstep'] = float(np.frombuffer(buf, dtype=">f4", count=1,
offset=offset))
stc['tstep'] /= 1000.0
offset += num_bytes
# read number of vertices/sources
vertices_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
# read the source vector
stc['vertices'] = np.frombuffer(buf, dtype=">u4", count=vertices_n,
offset=offset)
offset += num_bytes * vertices_n
# read the number of timepts
data_n = int(np.frombuffer(buf, dtype=">u4", count=1, offset=offset))
offset += num_bytes
if (vertices_n and # vertices_n can be 0 (empty stc)
((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.frombuffer(buf, dtype=">f4", count=vertices_n * data_n,
offset=offset)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file.
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tobytes())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tobytes())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tobytes())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tobytes())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tobytes())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tobytes())
# close the file
fid.close()
def _read_3(fid):
"""Read 3 byte integer from file."""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
"""Write 3 byte integer to file."""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tobytes())
def _write_w(filename, vertices, data):
"""Write a w file.
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert (len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tobytes())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tobytes())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a source estimate object.
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate
The source estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for vector surface source estimates, only HDF5 files are supported.
- for mixed source estimates, only HDF5 files are supported.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
""" # noqa: E501
fname_arg = fname
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
# make sure corresponding file(s) can be found
ftype = None
if op.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('.h5'):
ftype = 'h5'
fname = fname[:-3]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype != 'volume':
stc_exist = [op.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [op.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
if all(stc_exist) and ftype != 'w':
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif op.exists(fname + '.h5'):
ftype = 'h5'
elif op.exists(fname + '-stc.h5'):
ftype = 'h5'
fname += '-stc'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
kwargs['vertices'] = [kwargs['vertices']]
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
ftype = 'surface'
elif ftype == 'h5':
kwargs = read_hdf5(fname + '.h5', title='mnepython')
ftype = kwargs.pop('src_type', 'surface')
if isinstance(kwargs['vertices'], np.ndarray):
kwargs['vertices'] = [kwargs['vertices']]
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype in ('volume', 'discrete'):
klass = VolVectorSourceEstimate
elif ftype == 'mixed':
klass = MixedVectorSourceEstimate
else:
assert ftype == 'surface'
klass = VectorSourceEstimate
if kwargs['data'].ndim < 3:
klass = klass._scalar_class
return klass(**kwargs)
def _get_src_type(src, vertices, warn_text=None):
src_type = None
if src is None:
if warn_text is None:
warn("src should not be None for a robust guess of stc type.")
else:
warn(warn_text)
if isinstance(vertices, list) and len(vertices) == 2:
src_type = 'surface'
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \
and len(vertices) == 1:
src_type = 'volume'
elif isinstance(vertices, list) and len(vertices) > 2:
src_type = 'mixed'
else:
src_type = src.kind
assert src_type in ('surface', 'volume', 'mixed', 'discrete')
return src_type
def _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,
subject=None, vector=False, source_nn=None, warn_text=None):
"""Generate a surface, vector-surface, volume or mixed source estimate."""
def guess_src_type():
return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)
src_type = guess_src_type() if src_type is None else src_type
if vector and src_type == 'surface' and source_nn is None:
raise RuntimeError('No source vectors supplied.')
# infer Klass from src_type
if src_type == 'surface':
Klass = VectorSourceEstimate if vector else SourceEstimate
elif src_type in ('volume', 'discrete'):
Klass = VolVectorSourceEstimate if vector else VolSourceEstimate
elif src_type == 'mixed':
Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
# Rotate back for vector source estimates
if vector:
n_vertices = sum(len(v) for v in vertices)
assert data.shape[0] in (n_vertices, n_vertices * 3)
if len(data) == n_vertices:
assert src_type == 'surface' # should only be possible for this
assert source_nn.shape == (n_vertices, 3)
data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]
else:
data = data.reshape((-1, 3, data.shape[-1]))
assert source_nn.shape in ((n_vertices, 3, 3),
(n_vertices * 3, 3))
# This will be an identity transform for volumes, but let's keep
# the code simple and general and just do the matrix mult
data = np.matmul(
np.transpose(source_nn.reshape(n_vertices, 3, 3),
axes=[0, 2, 1]), data)
return Klass(
data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject
)
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations."""
compat = False
if type(a) != type(b):
raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine source estimates that do not have '
'the same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(TimeMixin):
_data_ndim = 2
@verbose
def __init__(self, data, vertices, tmin, tstep,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_data_ndim'), self.__class__.__name__
assert hasattr(self, '_src_type'), self.__class__.__name__
assert hasattr(self, '_src_count'), self.__class__.__name__
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel (%s) and sens_data (%s) have invalid '
'dimensions'
% (kernel.shape, sens_data.shape))
if sens_data.ndim != 2:
raise ValueError('The sensor data must have 2 dimensions, got '
'%s' % (sens_data.ndim,))
_validate_type(vertices, list, 'vertices')
if self._src_count is not None:
if len(vertices) != self._src_count:
raise ValueError('vertices must be a list with %d entries, '
'got %s' % (self._src_count, len(vertices)))
vertices = [np.array(v, np.int64) for v in vertices] # makes copy
if any(np.any(np.diff(v) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing order.')
n_src = sum([len(v) for v in vertices])
# safeguard the user against doing something silly
if data is not None:
if data.ndim not in (self._data_ndim, self._data_ndim - 1):
raise ValueError('Data (shape %s) must have %s dimensions for '
'%s' % (data.shape, self._data_ndim,
self.__class__.__name__))
if data.shape[0] != n_src:
raise ValueError(
f'Number of vertices ({n_src}) and stc.data.shape[0] '
f'({data.shape[0]}) must match')
if self._data_ndim == 3:
if data.shape[1] != 3:
raise ValueError(
'Data for VectorSourceEstimate must have '
'shape[1] == 3, got shape %s' % (data.shape,))
if data.ndim == self._data_ndim - 1: # allow upbroadcasting
data = data[..., np.newaxis]
self._data = data
self._tmin = tmin
self._tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self._times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def __repr__(self): # noqa: D105
s = "%d vertices" % (sum(len(v) for v in self.vertices),)
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data shape : %s" % (self.shape,)
sz = sum(object_size(x) for x in (self.vertices + [self.data]))
s += f", ~{sizeof_fmt(sz)}"
return "<%s | %s>" % (type(self).__name__, s)
@fill_doc
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
stc = self.magnitude() if self._data_ndim == 3 else self
if self._n_vertices == 0:
raise RuntimeError('Cannot find peaks with no vertices')
vert_idx, time_idx, _ = _get_peak(
stc.data, self.times, tmin, tmax, mode)
if not vert_as_index:
vert_idx = np.concatenate(self.vertices)[vert_idx]
if not time_as_index:
time_idx = self.times[time_idx]
return vert_idx, time_idx
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, verbose=verbose)
@verbose
def apply_baseline(self, baseline=(None, 0), *, verbose=None):
"""Baseline correct source estimate data.
Parameters
----------
%(baseline_stc)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The baseline-corrected source estimate object.
Notes
-----
Baseline correction can be done multiple times.
"""
self.data = rescale(self.data, self.times, baseline, copy=False)
return self
@verbose
def save(self, fname, ftype='h5', verbose=None):
"""Save the full source estimate to an HDF5 file.
Parameters
----------
fname : str
The file name to write the source estimate to, should end in
'-stc.h5'.
ftype : str
File format to use. Currently, the only allowed values is "h5".
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
if ftype != 'h5':
raise ValueError('%s objects can only be written as HDF5 files.'
% (self.__class__.__name__,))
if not fname.endswith('.h5'):
fname += '-stc.h5'
write_hdf5(fname,
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep, subject=self.subject,
src_type=self._src_type),
title='mnepython', overwrite=True)
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=1.0, time_viewer='auto',
subjects_dir=None,
figure=None, views='auto', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None):
brain = plot_source_estimates(
self, subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit, backend=backend,
spacing=spacing, title=title, show_traces=show_traces,
src=src, volume_options=volume_options, view_layout=view_layout,
add_data_kwargs=add_data_kwargs, verbose=verbose)
return brain
@property
def sfreq(self):
"""Sample rate of the data."""
return 1. / self.tstep
@property
def _n_vertices(self):
return sum(len(v) for v in self.vertices)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data."""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Restrict SourceEstimate to a time interval.
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
%(include_tmax)s
Returns
-------
stc : instance of SourceEstimate
The cropped source estimate.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,
include_tmax=include_tmax)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[..., mask]
else:
self.data = self.data[..., mask]
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample data.
If appropriate, an anti-aliasing filter is applied before resampling.
See :ref:`resampling-and-decimating` for more information.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : str | tuple
Window to use in resampling. See :func:`scipy.signal.resample`.
%(n_jobs)s
%(verbose_meth)s
Returns
-------
stc : instance of SourceEstimate
The resampled source estimate.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
data = self.data
if data.dtype == np.float32:
data = data.astype(np.float64)
self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
return self
@property
def data(self):
"""Numpy array of source estimate data."""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@data.setter
def data(self, value):
value = np.asarray(value)
if self._data is not None and value.ndim != self._data.ndim:
raise ValueError('Data array should have %d dimensions.' %
self._data.ndim)
n_verts = sum(len(v) for v in self.vertices)
if value.shape[0] != n_verts:
raise ValueError('The first dimension of the data array must '
'match the number of vertices (%d != %d)' %
(value.shape[0], n_verts))
self._data = value
self._update_times()
@property
def shape(self):
"""Shape of the data."""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
@property
def tmin(self):
"""The first timestamp."""
return self._tmin
@tmin.setter
def tmin(self, value):
self._tmin = float(value)
self._update_times()
@property
def tstep(self):
"""The change in time between two consecutive samples (1 / sfreq)."""
return self._tstep
@tstep.setter
def tstep(self, value):
if value <= 0:
raise ValueError('.tstep must be greater than 0.')
self._tstep = float(value)
self._update_times()
@property
def times(self):
"""A timestamp for each sample."""
return self._times
@times.setter
def times(self, value):
raise ValueError('You cannot write to the .times attribute directly. '
'This property automatically updates whenever '
'.tmin, .tstep or .data changes.')
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep."""
self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))
self._times.flags.writeable = False
def __add__(self, a):
"""Add source estimates."""
stc = self.copy()
stc += a
return stc
def __iadd__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data += a.data
else:
self.data += a
return self
def mean(self):
"""Make a summary stc file with mean over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
out = self.sum()
out /= len(self.times)
return out
def sum(self):
"""Make a summary stc file with sum over time points.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc.
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[-1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return sum_stc
def __sub__(self, a):
"""Subtract source estimates."""
stc = self.copy()
stc -= a
return stc
def __isub__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data -= a.data
else:
self.data -= a
return self
def __truediv__(self, a): # noqa: D105
return self.__div__(a)
def __div__(self, a): # noqa: D105
"""Divide source estimates."""
stc = self.copy()
stc /= a
return stc
def __itruediv__(self, a): # noqa: D105
return self.__idiv__(a)
def __idiv__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data /= a.data
else:
self.data /= a
return self
def __mul__(self, a):
"""Multiply source estimates."""
stc = self.copy()
stc *= a
return stc
def __imul__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self.data *= a.data
else:
self.data *= a
return self
def __pow__(self, a): # noqa: D105
stc = self.copy()
stc **= a
return stc
def __ipow__(self, a): # noqa: D105
self._remove_kernel_sens_data_()
self.data **= a
return self
def __radd__(self, a): # noqa: D105
return self + a
def __rsub__(self, a): # noqa: D105
return self - a
def __rmul__(self, a): # noqa: D105
return self * a
def __rdiv__(self, a): # noqa: D105
return self / a
def __neg__(self): # noqa: D105
"""Negate the source estimate."""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc.data *= -1
return stc
def __pos__(self): # noqa: D105
return self
def __abs__(self):
"""Compute the absolute value of the data.
Returns
-------
stc : instance of _BaseSourceEstimate
A version of the source estimate, where the data attribute is set
to abs(self.data).
"""
stc = self.copy()
stc._remove_kernel_sens_data_()
stc._data = abs(stc._data)
return stc
def sqrt(self):
"""Take the square root.
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of source estimate instance.
Returns
-------
stc : instance of SourceEstimate
A copy of the source estimate.
"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Return a source estimate object with data summarized over time bins.
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The binned source estimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nt = len(times) - 1
data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[..., i] = func(self.data[..., idx], axis=-1)
tmin = times[0] + width / 2.
stc = self.copy()
stc._data = data
stc.tmin = tmin
stc.tstep = width
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warn('Performance can be improved by not accessing the data '
'attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform.
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
:func:`functools.partial`). The first parameter of the function is
the input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. See Notes for details.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : SourceEstimate | VectorSourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "apply_lcmv_epochs"
do this automatically (if possible).
"""
# min and max data indices to include
times = 1000. * self.times
t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
# +1, because upper boundary needs to include the last sample
tmax_idx = t_idx[-1] + 1
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmin = self.times[tmin_idx]
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs.vertices = verts
stcs.data = data_t
stcs.tmin = tmin
return stcs
@fill_doc
def to_data_frame(self, index=None, scalings=None,
long_format=False, time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Vertices are converted to columns in the DataFrame. By default,
an additional column "time" is added, unless ``index='time'``
(in which case time values form the DataFrame's index).
Parameters
----------
%(df_index_evk)s
Defaults to ``None``.
%(df_scalings)s
%(df_longform_stc)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'subject']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
data = self.data.T
times = self.times
# prepare extra columns / multiindex
mindex = list()
default_index = ['time']
if self.subject is not None:
default_index = ['subject', 'time']
mindex.append(('subject', np.repeat(self.subject, data.shape[0])))
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
# triage surface vs volume source estimates
col_names = list()
kinds = ['VOL'] * len(self.vertices)
if isinstance(self, (_BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate)):
kinds[:2] = ['LH', 'RH']
for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):
col_names.extend(['{}_{}'.format(kind, vert) for vert in vertno])
# build DataFrame
df = _build_data_frame(self, data, None, long_format, mindex, index,
default_index=default_index,
col_names=col_names, col_kind='source')
return df
def _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,
restrict_vertices):
"""Find the center of mass on a surface."""
if (values == 0).all() or (values < 0).any():
raise ValueError('All values must be non-negative and at least one '
'must be non-zero, cannot compute COM')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
surf = read_surface(op.join(subjects_dir, subject, 'surf',
hemi + '.' + surf))
if restrict_vertices is True:
restrict_vertices = vertices
elif restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif isinstance(restrict_vertices, SourceSpaces):
idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0
restrict_vertices = restrict_vertices[idx]['vertno']
else:
restrict_vertices = np.array(restrict_vertices, int)
pos = surf[0][vertices, :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
return vertex
@fill_doc
class _BaseSurfaceSourceEstimate(_BaseSourceEstimate):
"""Abstract base class for surface source estimates.
Parameters
----------
data : array
The data in source space.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
data : array
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
_src_type = 'surface'
_src_count = 2
@property
def lh_data(self):
"""Left hemisphere data."""
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
"""Right hemisphere data."""
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
"""Left hemisphere vertno."""
return self.vertices[0]
@property
def rh_vertno(self):
"""Right hemisphere vertno."""
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The source estimate restricted to the given label.
"""
# make sure label and stc are compatible
from .label import Label, BiHemiLabel
_validate_type(label, (Label, BiHemiLabel), 'label')
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
else:
assert label.hemi == 'rh'
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices.
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds),) + self.data.shape[1:])
self.data = np.insert(self.data, inds, new_data, axis=0)
return self
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Get a source estimate from morphed source to the original subject.
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
%(subjects_dir)s
%(verbose_meth)s
Returns
-------
stc : SourceEstimate | VectorSourceEstimate
The transformed source estimate.
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surface')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return self.__class__(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@fill_doc
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude.
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
%(get_peak_parameters)s
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
_check_option('hemi', hemi, ('lh', 'rh', None))
vertex_offset = 0
if hemi is not None:
if hemi == 'lh':
data = self.lh_data
vertices = [self.lh_vertno, []]
else:
vertex_offset = len(self.vertices[0])
data = self.rh_data
vertices = [[], self.rh_vertno]
meth = self.__class__(
data, vertices, self.tmin, self.tstep).get_peak
else:
meth = super().get_peak
out = meth(tmin=tmin, tmax=tmax, mode=mode,
vert_as_index=vert_as_index,
time_as_index=time_as_index)
if vertex_offset and vert_as_index:
out = (out[0] + vertex_offset, out[1])
return out
@fill_doc
class SourceEstimate(_BaseSurfaceSourceEstimate):
"""Container for surface source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. When it is a single array, the
left hemisphere is stored in data[:len(vertices[0])] and the right
hemisphere is stored in data[-len(vertices[1]):].
When data is a tuple, it contains two arrays:
- "kernel" shape (n_vertices, n_sensors) and
- "sens_data" shape (n_sensors, n_times).
In this case, the source space data corresponds to
``np.dot(kernel, sens_data)``.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array, shape (2,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
if np.iscomplexobj(self.data):
raise ValueError("Cannot save complex-valued STC data in "
"FIFF format; please set ftype='h5' to save "
"in HDF5 format instead, or cast the data to "
"real numbers before saving.")
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
super().save(fname)
logger.info('[done]')
@verbose
def estimate_snr(self, info, fwd, cov, verbose=None):
r"""Compute time-varying SNR in the source space.
This function should only be used with source estimates with units
nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).
.. warning:: This function currently only works properly for fixed
orientation.
Parameters
----------
info : instance Info
The measurement info.
fwd : instance of Forward
The forward solution used to create the source estimate.
cov : instance of Covariance
The noise covariance used to estimate the resting cortical
activations. Should be an evoked covariance, not empty room.
%(verbose)s
Returns
-------
snr_stc : instance of SourceEstimate
The source estimate with the SNR computed.
Notes
-----
We define the SNR in decibels for each source location at each
time point as:
.. math::
{\rm SNR} = 10\log_10[\frac{a^2}{N}\sum_k\frac{b_k^2}{s_k^2}]
where :math:`\\b_k` is the signal on sensor :math:`k` provided by the
forward model for a source with unit amplitude, :math:`a` is the
source amplitude, :math:`N` is the number of sensors, and
:math:`s_k^2` is the noise variance on sensor :math:`k`.
References
----------
.. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,
D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).
Mapping the Signal-To-Noise-Ratios of Cortical Sources in
Magnetoencephalography and Electroencephalography.
Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571
"""
from .forward import convert_forward_solution, Forward
from .minimum_norm.inverse import _prepare_forward
_validate_type(fwd, Forward, 'fwd')
_validate_type(info, Info, 'info')
_validate_type(cov, Covariance, 'cov')
_check_stc_units(self)
if (self.data >= 0).all():
warn('This STC appears to be from free orientation, currently SNR'
' function is valid only for fixed orientation')
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
# G is gain matrix [ch x src], cov is noise covariance [ch x ch]
G, _, _, _, _, _, _, cov, _ = _prepare_forward(
fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,
use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',
allow_fixed_depth=False, limit=None)
G = G['sol']['data']
n_channels = cov['dim'] # number of sensors/channels
b_k2 = (G * G).T
s_k2 = np.diag(cov['data'])
scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)
snr_stc = self.copy()
snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)
return snr_stc
@fill_doc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None, surf='sphere'):
"""Compute the center of mass of activity.
This function computes the spatial center of mass on the surface
as well as the temporal center of mass as in [1]_.
.. note:: All activity must occur in a single hemisphere, otherwise
an error is raised. The "mass" of each point in space for
computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in
computing the temporal center of mass. This is useful for
quantifying spatio-temporal cluster locations, especially
when combined with :func:`mne.vertex_to_mni`.
Parameters
----------
subject : str | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool | array of int | instance of SourceSpaces
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. If instance of SourceSpaces (as of
0.13), the returned vertex will be from the given source space.
For most accuruate estimates, do not restrict vertices.
%(subjects_dir)s
surf : str
The surface to use for Euclidean distance center of mass
finding. The default here is "sphere", which finds the center
of mass on the spherical surface to help avoid potential issues
with cortical folding.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
See Also
--------
mne.Label.center_of_mass
mne.vertex_to_mni
References
----------
.. [1] Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
if not isinstance(surf, str):
raise TypeError('surf must be a string, got %s' % (type(surf),))
subject = _check_subject(self.subject, subject)
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
_check_option('hemi', hemi, [0, 1])
vertices = self.vertices[hemi]
values = values[vert_inds[hemi]] # left or right
del vert_inds
vertex = _center_of_mass(
vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,
subject=subject, subjects_dir=subjects_dir,
restrict_vertices=restrict_vertices)
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
class _BaseVectorSourceEstimate(_BaseSourceEstimate):
_data_ndim = 3
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
assert hasattr(self, '_scalar_class')
super().__init__(data, vertices, tmin, tstep, subject, verbose)
def magnitude(self):
"""Compute magnitude of activity without directionality.
Returns
-------
stc : instance of SourceEstimate
The source estimate without directionality information.
"""
data_mag = np.linalg.norm(self.data, axis=1)
return self._scalar_class(
data_mag, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
def _get_src_normals(self, src, use_cps):
normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in
zip(src, self.vertices)])
return normals
@fill_doc
def project(self, directions, src=None, use_cps=True):
"""Project the data for each vertex in a given direction.
Parameters
----------
directions : ndarray, shape (n_vertices, 3) | str
Can be:
- ``'normal'``
Project onto the source space normals.
- ``'pca'``
SVD will be used to project onto the direction of maximal
power for each source.
- :class:`~numpy.ndarray`, shape (n_vertices, 3)
Projection directions for each source.
src : instance of SourceSpaces | None
The source spaces corresponding to the source estimate.
Not used when ``directions`` is an array, optional when
``directions='pca'``.
%(use_cps)s
Should be the same value that was used when the forward model
was computed (typically True).
Returns
-------
stc : instance of SourceEstimate
The projected source estimate.
directions : ndarray, shape (n_vertices, 3)
The directions that were computed (or just used).
Notes
-----
When using SVD, there is a sign ambiguity for the direction of maximal
power. When ``src is None``, the direction is chosen that makes the
resulting time waveform sum positive (i.e., have positive amplitudes).
When ``src`` is provided, the directions are flipped in the direction
of the source normals, i.e., outward from cortex for surface source
spaces and in the +Z / superior direction for volume source spaces.
.. versionadded:: 0.21
"""
_validate_type(directions, (str, np.ndarray), 'directions')
_validate_type(src, (None, SourceSpaces), 'src')
if isinstance(directions, str):
_check_option('directions', directions, ('normal', 'pca'),
extra='when str')
if directions == 'normal':
if src is None:
raise ValueError(
'If directions="normal", src cannot be None')
_check_src_normal('normal', src)
directions = self._get_src_normals(src, use_cps)
else:
assert directions == 'pca'
x = self.data
if not np.isrealobj(self.data):
_check_option('stc.data.dtype', self.data.dtype,
(np.complex64, np.complex128))
dtype = \
np.float32 if x.dtype == np.complex64 else np.float64
x = x.view(dtype)
assert x.shape[-1] == 2 * self.data.shape[-1]
u, _, v = np.linalg.svd(x, full_matrices=False)
directions = u[:, :, 0]
# The sign is arbitrary, so let's flip it in the direction that
# makes the resulting time series the most positive:
if src is None:
signs = np.sum(v[:, 0].real, axis=1, keepdims=True)
else:
normals = self._get_src_normals(src, use_cps)
signs = np.sum(directions * normals, axis=1, keepdims=True)
assert signs.shape == (self.data.shape[0], 1)
signs = np.sign(signs)
signs[signs == 0] = 1.
directions *= signs
_check_option(
'directions.shape', directions.shape, [(self.data.shape[0], 3)])
data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]
stc = self._scalar_class(
data_norm, self.vertices, self.tmin, self.tstep, self.subject,
self.verbose)
return stc, directions
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='lateral',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None, volume_options=1.,
view_layout='vertical', add_data_kwargs=None,
verbose=None): # noqa: D102
return plot_vector_source_estimates(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
class _BaseVolSourceEstimate(_BaseSourceEstimate):
_src_type = 'volume'
_src_count = None
@copy_function_doc_to_method_doc(plot_source_estimates)
def plot_3d(self, subject=None, surface='white', hemi='both',
colormap='auto', time_label='auto', smoothing_steps=10,
transparent=True, alpha=0.1, time_viewer='auto',
subjects_dir=None,
figure=None, views='axial', colorbar=True, clim='auto',
cortex="classic", size=800, background="black",
foreground=None, initial_time=None, time_unit='s',
backend='auto', spacing='oct6', title=None, show_traces='auto',
src=None, volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None):
return super().plot(
subject=subject, surface=surface, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha, time_viewer=time_viewer,
subjects_dir=subjects_dir,
figure=figure, views=views, colorbar=colorbar, clim=clim,
cortex=cortex, size=size, background=background,
foreground=foreground, initial_time=initial_time,
time_unit=time_unit, backend=backend, spacing=spacing, title=title,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_volume_source_estimates)
def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',
bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto',
transparent='auto', show=True, initial_time=None,
initial_pos=None, verbose=None):
data = self.magnitude() if self._data_ndim == 3 else self
return plot_volume_source_estimates(
data, src=src, subject=subject, subjects_dir=subjects_dir,
mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,
clim=clim, transparent=transparent, show=show,
initial_time=initial_time, initial_pos=initial_pos,
verbose=verbose)
# Override here to provide the volume-specific options
@verbose
def extract_label_time_course(self, labels, src, mode='auto',
allow_empty=False, *, trans=None,
mri_resolution=True, verbose=None):
"""Extract label time courses for lists of labels.
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Parameters
----------
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
%(trans_deprecated)s
%(eltc_mri_resolution)s
%(verbose_meth)s
Returns
-------
%(eltc_returns)s
See Also
--------
extract_label_time_course : Extract time courses for multiple STCs.
Notes
-----
%(eltc_mode_notes)s
"""
return extract_label_time_course(
self, labels, src, mode=mode, return_generator=False,
allow_empty=allow_empty, trans=trans,
mri_resolution=mri_resolution, verbose=verbose)
@fill_doc
def in_label(self, label, mri, src, trans=None):
"""Get a source estimate object restricted to a label.
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : str | int
The label to use. Can be the name of a label if using a standard
FreeSurfer atlas, or an integer value to extract from the ``mri``.
mri : str
Path to the atlas to use.
src : instance of SourceSpaces
The volumetric source space. It must be a single, whole-brain
volume.
%(trans_deprecated)s
Returns
-------
stc : VolSourceEstimate | VolVectorSourceEstimate
The source estimate restricted to the given label.
Notes
-----
.. versionadded:: 0.21.0
"""
if len(self.vertices) != 1:
raise RuntimeError('This method can only be used with whole-brain '
'volume source spaces')
_validate_type(label, (str, 'int-like'), 'label')
if isinstance(label, str):
volume_label = [label]
else:
volume_label = {'Volume ID %s' % (label): _ensure_int(label)}
_dep_trans(trans)
label = _volume_labels(src, (mri, volume_label), mri_resolution=False)
assert len(label) == 1
label = label[0]
vertices = label.vertices
keep = np.in1d(self.vertices[0], label.vertices)
values, vertices = self.data[keep], [self.vertices[0][keep]]
label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,
tstep=self.tstep, subject=self.subject)
return label_stc
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Save a volume source estimate in a NIfTI file.
Parameters
----------
fname : str
The name of the generated nifti file.
src : list
The list of source spaces (should all be of type volume).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
.. versionadded:: 0.17
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
import nibabel as nib
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,
format=format)
nib.save(img, fname)
def as_volume(self, src, dest='mri', mri_resolution=False,
format='nifti1'):
"""Export volume source estimate as a nifti object.
Parameters
----------
src : instance of SourceSpaces
The source spaces (should all be of type volume, or part of a
mixed source space).
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution : bool
It True the image is saved in MRI resolution.
.. warning:: If you have many time points, the file produced can be
huge.
format : str
Either 'nifti1' (default) or 'nifti2'.
Returns
-------
img : instance of Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
from .morph import _interpolate_data
data = self.magnitude() if self._data_ndim == 3 else self
return _interpolate_data(data, src, mri_resolution=mri_resolution,
mri_space=True, output=format)
@fill_doc
class VolSourceEstimate(_BaseVolSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolVectorSourceEstimate : A container for volume vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file.
Parameters
----------
fname : str
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : str
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
%(verbose_meth)s
"""
_validate_type(fname, 'path-like', 'fname')
fname = str(fname)
_check_option('ftype', ftype, ['stc', 'w', 'h5'])
if ftype != 'h5' and len(self.vertices) != 1:
raise ValueError('Can only write to .stc or .w if a single volume '
'source space was used, use .h5 instead')
if ftype != 'h5' and self.data.dtype == 'complex':
raise ValueError('Can only write non-complex data to .stc or .w'
', use .h5 instead')
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices[0], data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices[0], data=self.data)
elif ftype == 'h5':
super().save(fname, 'h5')
logger.info('[done]')
@fill_doc
class VolVectorSourceEstimate(_BaseVolSourceEstimate,
_BaseVectorSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
_scalar_class = VolSourceEstimate
# defaults differ: hemi='both', views='axial'
@copy_function_doc_to_method_doc(plot_vector_source_estimates)
def plot_3d(self, subject=None, hemi='both', colormap='hot',
time_label='auto',
smoothing_steps=10, transparent=True, brain_alpha=0.4,
overlay_alpha=None, vector_alpha=1.0, scale_factor=None,
time_viewer='auto', subjects_dir=None, figure=None,
views='axial',
colorbar=True, clim='auto', cortex='classic', size=800,
background='black', foreground=None, initial_time=None,
time_unit='s', show_traces='auto', src=None,
volume_options=1., view_layout='vertical',
add_data_kwargs=None, verbose=None): # noqa: D102
return _BaseVectorSourceEstimate.plot(
self, subject=subject, hemi=hemi, colormap=colormap,
time_label=time_label, smoothing_steps=smoothing_steps,
transparent=transparent, brain_alpha=brain_alpha,
overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,
scale_factor=scale_factor, time_viewer=time_viewer,
subjects_dir=subjects_dir, figure=figure, views=views,
colorbar=colorbar, clim=clim, cortex=cortex, size=size,
background=background, foreground=foreground,
initial_time=initial_time, time_unit=time_unit,
show_traces=show_traces, src=src, volume_options=volume_options,
view_layout=view_layout, add_data_kwargs=add_data_kwargs,
verbose=verbose)
@fill_doc
class VectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseSurfaceSourceEstimate):
"""Container for vector surface source estimates.
For each vertex, the magnitude of the current is defined in the X, Y and Z
directions.
Parameters
----------
data : array of shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (2,)
Vertex numbers corresponding to the data. The first element of the list
contains vertices of left hemisphere and the second element contains
vertices of right hemisphere.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VolSourceEstimate : A container for volume source estimates.
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.15
"""
_scalar_class = SourceEstimate
###############################################################################
# Mixed source estimate (two cortical surfs plus other stuff)
class _BaseMixedSourceEstimate(_BaseSourceEstimate):
_src_type = 'mixed'
_src_count = None
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None): # noqa: D102
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
super().__init__(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@property
def _n_surf_vert(self):
return sum(len(v) for v in self.vertices[:2])
def surface(self):
"""Return the cortical surface source estimate.
Returns
-------
stc : instance of SourceEstimate or VectorSourceEstimate
The surface source estimate.
"""
if self._data_ndim == 3:
klass = VectorSourceEstimate
else:
klass = SourceEstimate
return klass(
self.data[:self._n_surf_vert], self.vertices[:2],
self.tmin, self.tstep, self.subject, self.verbose)
def volume(self):
"""Return the volume surface source estimate.
Returns
-------
stc : instance of VolSourceEstimate or VolVectorSourceEstimate
The volume source estimate.
"""
if self._data_ndim == 3:
klass = VolVectorSourceEstimate
else:
klass = VolSourceEstimate
return klass(
self.data[self._n_surf_vert:], self.vertices[2:],
self.tmin, self.tstep, self.subject, self.verbose)
@fill_doc
class MixedSourceEstimate(_BaseMixedSourceEstimate):
"""Container for mixed surface and volume source estimates.
Parameters
----------
data : array of shape (n_dipoles, n_times) | tuple, shape (2,)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to ``np.dot(kernel, sens_data)``.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of array
Vertex numbers corresponding to the data. The list contains arrays
with one array per source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
SourceEstimate : A container for surface source estimates.
VectorSourceEstimate : A container for vector source estimates.
VolSourceEstimate : A container for volume source estimates.
VolVectorSourceEstimate : A container for Volume vector source estimates.
Notes
-----
.. versionadded:: 0.9.0
"""
@fill_doc
class MixedVectorSourceEstimate(_BaseVectorSourceEstimate,
_BaseMixedSourceEstimate):
"""Container for volume source estimates.
Parameters
----------
data : array, shape (n_dipoles, 3, n_times)
The data in source space. Each dipole contains three vectors that
denote the dipole strength in X, Y and Z directions over time.
vertices : list of array, shape (n_src,)
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
%(verbose)s
Attributes
----------
subject : str | None
The subject name.
times : array, shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
See Also
--------
MixedSourceEstimate : A container for mixed surface + volume source
estimates.
Notes
-----
.. versionadded:: 0.21.0
"""
_scalar_class = MixedSourceEstimate
###############################################################################
# Morphing
def _get_vol_mask(src):
"""Get the volume source space mask."""
assert len(src) == 1 # not a mixed source space
shape = src[0]['shape'][::-1]
mask = np.zeros(shape, bool)
mask.flat[src[0]['vertno']] = True
return mask
def _spatio_temporal_src_adjacency_vol(src, n_times):
from sklearn.feature_extraction import grid_to_graph
mask = _get_vol_mask(src)
edges = grid_to_graph(*mask.shape, mask=mask)
adjacency = _get_adjacency_from_edges(edges, n_times)
return adjacency
def _spatio_temporal_src_adjacency_surf(src, n_times):
if src[0]['use_tris'] is None:
# XXX It would be nice to support non oct source spaces too...
raise RuntimeError("The source space does not appear to be an ico "
"surface. adjacency cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]
tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off
for u_v, s, off in zip(used_verts, src, offs)])
adjacency = spatio_temporal_tris_adjacency(tris, n_times)
# deal with source space only using a subset of vertices
masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:
raise ValueError('Used vertices do not match adjacency shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based adjacency will have holes.\n'
'Consider using distance-based adjacency or '
'morphing data to all source space vertices.' % missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
adjacency = adjacency.tocsr()
adjacency = adjacency[masks]
adjacency = adjacency[:, masks]
# return to original format
adjacency = adjacency.tocoo()
return adjacency
@verbose
def spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):
"""Compute adjacency for a source space activation over time.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
# XXX we should compute adjacency for each source space and then
# use scipy.sparse.block_diag to concatenate them
if src[0]['type'] == 'vol':
if dist is not None:
raise ValueError('dist must be None for a volume '
'source space. Got %s.' % dist)
adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)
elif dist is not None:
# use distances computed and saved in the source space file
adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)
else:
adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)
return adjacency
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade.
Parameters
----------
grade : int
Grade of an icosahedral mesh.
%(verbose)s
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_adjacency.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute adjacency from triangles and time instants.
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris)
edges = (edges + sparse.eye(edges.shape[0], format='csr')).tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):
"""Compute adjacency from distances in a source space and time instants.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
n_times : int
Number of time points.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using '
'setup_source_space with add_dist=True')
blocks = [s['dist'][s['vertno'], :][:, s['vertno']] for s in src]
# Ensure we keep explicit zeros; deal with changes in SciPy
for block in blocks:
if isinstance(block, np.ndarray):
block[block == 0] = -np.inf
else:
block.data[block.data == 0] == -1
edges = sparse_block_diag(blocks)
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_adjacency_from_edges(edges, n_times)
@verbose
def spatial_src_adjacency(src, dist=None, verbose=None):
"""Compute adjacency for a source space activation.
Parameters
----------
src : instance of SourceSpaces
The source space. It can be a surface source space or a
volume source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_src_adjacency(src, 1, dist)
@verbose
def spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):
"""Compute adjacency from triangles.
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)
@verbose
def spatial_dist_adjacency(src, dist, verbose=None):
"""Compute adjacency from distances in a source space.
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained
with a call to :func:`mne.setup_source_space` with the
``add_dist=True`` option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_adjacency(src, 1, dist)
@verbose
def spatial_inter_hemi_adjacency(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere.
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
%(verbose)s
Returns
-------
adjacency : ~scipy.sparse.coo_matrix
The adjacency matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric adjacency matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surface')
adj = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
adj = sparse.csr_matrix(adj <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]
adj = sparse.vstack([sparse.hstack([empties[0], adj]),
sparse.hstack([adj.T, empties[1]])])
return adj
@verbose
def _get_adjacency_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create adjacency matrix."""
n_vertices = edges.shape[0]
logger.info("-- number of adjacent vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int64)
adjacency = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices,) * 2)
return adjacency
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def _pca_flip(flip, data):
U, s, V = linalg.svd(data, full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(data))
return sign * scale * V[0]
_label_funcs = {
'mean': lambda flip, data: np.mean(data, axis=0),
'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),
'max': lambda flip, data: np.max(np.abs(data), axis=0),
'pca_flip': _pca_flip,
}
@contextlib.contextmanager
def _temporary_vertices(src, vertices):
orig_vertices = [s['vertno'] for s in src]
for s, v in zip(src, vertices):
s['vertno'] = v
try:
yield
finally:
for s, v in zip(src, orig_vertices):
s['vertno'] = v
def _check_stc_src(stc, src):
if stc is not None and src is not None:
for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):
n_missing = (~np.in1d(v, s['vertno'])).sum()
if n_missing:
raise ValueError('%d/%d %s hemisphere stc vertices '
'missing from the source space, likely '
'mismatch' % (n_missing, len(v), hemi))
def _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):
"""Prepare indices and flips for extract_label_time_course."""
# If src is a mixed src space, the first 2 src spaces are surf type and
# the other ones are vol type. For mixed source space n_labels will be
# given by the number of ROIs of the cortical parcellation plus the number
# of vol src space.
# If stc=None (i.e. no activation time courses provided) and mode='mean',
# only computes vertex indices and label_flip will be list of None.
from .label import label_sign_flip, Label, BiHemiLabel
# if source estimate provided in stc, get vertices from source space and
# check that they are the same as in the stcs
_check_stc_src(stc, src)
vertno = [s['vertno'] for s in src] if stc is None else stc.vertices
nvert = [len(vn) for vn in vertno]
# initialization
label_flip = list()
label_vertidx = list()
bad_labels = list()
for li, label in enumerate(labels):
if use_sparse:
assert isinstance(label, dict)
vertidx = label['csr']
# This can happen if some labels aren't present in the space
if vertidx.shape[0] == 0:
bad_labels.append(label['name'])
vertidx = None
# Efficiency shortcut: use linearity early to avoid redundant
# calculations
elif mode == 'mean':
vertidx = sparse.csr_matrix(vertidx.mean(axis=0))
label_vertidx.append(vertidx)
label_flip.append(None)
continue
# standard case
_validate_type(label, (Label, BiHemiLabel), 'labels[%d]' % (li,))
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertices = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertices)
elif slabel.hemi == 'rh':
this_vertices = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
this_flip = None
if len(this_vertidx) == 0:
bad_labels.append(label.name)
this_vertidx = None # to later check if label is empty
elif mode not in ('mean', 'max'): # mode-dependent initialization
# label_sign_flip uses two properties:
#
# - src[ii]['nn']
# - src[ii]['vertno']
#
# So if we override vertno with the stc vertices, it will pick
# the correct normals.
with _temporary_vertices(src, stc.vertices):
this_flip = label_sign_flip(label, src[:2])[:, None]
label_vertidx.append(this_vertidx)
label_flip.append(this_flip)
if len(bad_labels):
msg = ('source space does not contain any vertices for %d label%s:\n%s'
% (len(bad_labels), _pl(bad_labels), bad_labels))
if not allow_empty:
raise ValueError(msg)
else:
msg += '\nAssigning all-zero time series.'
if allow_empty == 'ignore':
logger.info(msg)
else:
warn(msg)
return label_vertidx, label_flip
def _vol_src_rr(src):
return apply_trans(
src[0]['src_mri_t'], np.array(
[d.ravel(order='F')
for d in np.meshgrid(
*(np.arange(s) for s in src[0]['shape']),
indexing='ij')],
float).T)
def _volume_labels(src, labels, mri_resolution):
# This will create Label objects that should do the right thing for our
# given volumetric source space when used with extract_label_time_course
from .label import Label
assert src.kind == 'volume'
extra = ' when using a volume source space'
_import_nibabel('use volume atlas labels')
_validate_type(labels, ('path-like', list, tuple), 'labels' + extra)
if _check_path_like(labels):
mri = labels
infer_labels = True
else:
if len(labels) != 2:
raise ValueError('labels, if list or tuple, must have length 2, '
'got %s' % (len(labels),))
mri, labels = labels
infer_labels = False
_validate_type(mri, 'path-like', 'labels[0]' + extra)
logger.info('Reading atlas %s' % (mri,))
vol_info = _get_mri_info_data(str(mri), data=True)
atlas_data = vol_info['data']
atlas_values = np.unique(atlas_data)
if atlas_values.dtype.kind == 'f': # MGZ will be 'i'
atlas_values = atlas_values[np.isfinite(atlas_values)]
if not (atlas_values == np.round(atlas_values)).all():
raise RuntimeError('Non-integer values present in atlas, cannot '
'labelize')
atlas_values = np.round(atlas_values).astype(np.int64)
if infer_labels:
labels = {
k: v for k, v in read_freesurfer_lut()[0].items()
if v in atlas_values}
labels = _check_volume_labels(labels, mri, name='labels[1]')
assert isinstance(labels, dict)
del atlas_values
vox_mri_t = vol_info['vox_mri_t']
want = src[0].get('vox_mri_t', None)
if want is None:
raise RuntimeError(
'Cannot use volumetric atlas if no mri was supplied during '
'source space creation')
vox_mri_t, want = vox_mri_t['trans'], want['trans']
if not np.allclose(vox_mri_t, want, atol=1e-6):
raise RuntimeError(
'atlas vox_mri_t does not match that used to create the source '
'space')
src_shape = tuple(src[0]['mri_' + k] for k in ('width', 'height', 'depth'))
atlas_shape = atlas_data.shape
if atlas_shape != src_shape:
raise RuntimeError('atlas shape %s does not match source space MRI '
'shape %s' % (atlas_shape, src_shape))
atlas_data = atlas_data.ravel(order='F')
if mri_resolution:
# Upsample then just index
out_labels = list()
nnz = 0
interp = src[0]['interpolator']
# should be guaranteed by size checks above and our src interp code
assert interp.shape[0] == np.prod(src_shape)
assert interp.shape == (atlas_data.size, len(src[0]['rr']))
interp = interp[:, src[0]['vertno']]
for k, v in labels.items():
mask = atlas_data == v
csr = interp[mask]
out_labels.append(dict(csr=csr, name=k))
nnz += csr.shape[0] > 0
else:
# Use nearest values
vertno = src[0]['vertno']
rr = _vol_src_rr(src)
del src
src_values = _get_atlas_values(vol_info, rr[vertno])
vertices = [vertno[src_values == val] for val in labels.values()]
out_labels = [Label(v, hemi='lh', name=val)
for v, val in zip(vertices, labels.keys())]
nnz = sum(len(v) != 0 for v in vertices)
logger.info('%d/%d atlas regions had at least one vertex '
'in the source space' % (nnz, len(out_labels)))
return out_labels
def _dep_trans(trans):
if trans is not None:
warn('trans is no longer needed and will be removed in 0.23, do not '
'pass it as an argument', DeprecationWarning)
def _get_default_label_modes():
return sorted(_label_funcs.keys()) + ['auto']
def _get_allowed_label_modes(stc):
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
return ('mean', 'max', 'auto')
else:
return _get_default_label_modes()
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, trans=None,
mri_resolution=True, verbose=None):
# loop through source estimates and extract time series
if src is None and mode in ['mean', 'max']:
kind = 'surface'
else:
_validate_type(src, SourceSpaces)
kind = src.kind
_dep_trans(trans)
_check_option('mode', mode, _get_default_label_modes())
if kind in ('surface', 'mixed'):
if not isinstance(labels, list):
labels = [labels]
use_sparse = False
else:
labels = _volume_labels(src, labels, mri_resolution)
use_sparse = bool(mri_resolution)
n_mode = len(labels) # how many processed with the given mode
n_mean = len(src[2:]) if kind == 'mixed' else 0
n_labels = n_mode + n_mean
vertno = func = None
for si, stc in enumerate(stcs):
_validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,),
'source estimate')
_check_option(
'mode', mode, _get_allowed_label_modes(stc),
'when using a vector and/or volume source estimate')
if isinstance(stc, (_BaseVolSourceEstimate,
_BaseVectorSourceEstimate)):
mode = 'mean' if mode == 'auto' else mode
else:
mode = 'mean_flip' if mode == 'auto' else mode
if vertno is None:
vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref
nvert = np.array([len(v) for v in vertno])
label_vertidx, src_flip = _prepare_label_extraction(
stc, labels, src, mode, allow_empty, use_sparse)
func = _label_funcs[mode]
# make sure the stc is compatible with the source space
if len(vertno) != len(stc.vertices):
raise ValueError('stc not compatible with source space')
for vn, svn in zip(vertno, stc.vertices):
if len(vn) != len(svn):
raise ValueError('stc not compatible with source space. '
'stc has %s time series but there are %s '
'vertices in source space. Ensure you used '
'src from the forward or inverse operator, '
'as forward computation can exclude vertices.'
% (len(svn), len(vn)))
if not np.array_equal(svn, vn):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels,) + stc.data.shape[1:],
dtype=stc.data.dtype)
for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):
if vertidx is not None:
if isinstance(vertidx, sparse.csr_matrix):
assert mri_resolution
assert vertidx.shape[1] == stc.data.shape[0]
this_data = np.reshape(stc.data, (stc.data.shape[0], -1))
this_data = vertidx @ this_data
this_data.shape = \
(this_data.shape[0],) + stc.data.shape[1:]
else:
this_data = stc.data[vertidx]
label_tc[i] = func(flip, this_data)
# extract label time series for the vol src space (only mean supported)
offset = nvert[:-n_mean].sum() # effectively :2 or :0
for i, nv in enumerate(nvert[2:]):
if nv != 0:
v2 = offset + nv
label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)
offset = v2
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='auto',
allow_empty=False, return_generator=False,
*, trans=None, mri_resolution=True,
verbose=None):
"""Extract label time course for lists of labels and source estimates.
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter (see Notes).
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
%(eltc_labels)s
%(eltc_src)s
%(eltc_mode)s
%(eltc_allow_empty)s
return_generator : bool
If True, a generator instead of a list is returned.
%(trans_deprecated)s
%(eltc_mri_resolution)s
%(verbose)s
Returns
-------
%(eltc_returns)s
Notes
-----
%(eltc_mode_notes)s
If encountering a ``ValueError`` due to mismatch between number of
source points in the subject source space and computed ``stc`` object set
``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source
space is the one actually used by the inverse to compute the source
time courses.
"""
# convert inputs to lists
if not isinstance(stcs, (list, tuple, GeneratorType)):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
label_tc = _gen_extract_label_time_course(
stcs, labels, src, mode=mode, allow_empty=allow_empty,
trans=trans, mri_resolution=mri_resolution)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
@verbose
def stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum',
project=True, subjects_dir=None, src=None, verbose=None):
"""Create a STC from ECoG and sEEG sensor data.
Parameters
----------
evoked : instance of Evoked
The evoked data. Must contain ECoG, or sEEG channels.
%(trans)s
subject : str
The subject name.
distance : float
Distance (m) defining the activation "ball" of the sensor.
mode : str
Can be "sum" to do a linear sum of weights, "nearest" to
use only the weight of the nearest sensor, or "zero" to use a
zero-order hold. See Notes.
project : bool
If True, project the electrodes to the nearest ``'pial`` surface
vertex before computing distances. Only used when doing a
surface projection.
%(subjects_dir)s
src : instance of SourceSpaces
The source space.
.. warning:: If a surface source space is used, make sure that
``surf='pial'`` was used during construction.
%(verbose)s
Returns
-------
stc : instance of SourceEstimate
The surface source estimate. If src is None, a surface source
estimate will be produced, and the number of vertices will equal
the number of pial-surface vertices that were close enough to
the sensors to take on a non-zero volue. If src is not None,
a surface, volume, or mixed source estimate will be produced
(depending on the kind of source space passed) and the
vertices will match those of src (i.e., there may be me
many all-zero values in stc.data).
Notes
-----
For surface projections, this function projects the ECoG sensors to
the pial surface (if ``project``), then the activation at each pial
surface vertex is given by the mode:
- ``'sum'``
Activation is the sum across each sensor weighted by the fractional
``distance`` from each sensor. A sensor with zero distance gets weight
1 and a sensor at ``distance`` meters away (or larger) gets weight 0.
If ``distance`` is less than the distance between any two electrodes,
this will be the same as ``'nearest'``.
- ``'weighted'``
Same as ``'sum'`` except that only the nearest electrode is used,
rather than summing across electrodes within the ``distance`` radius.
As as ``'nearest'`` for vertices with distance zero to the projected
sensor.
- ``'nearest'``
The value is given by the value of the nearest sensor, up to a
``distance`` (beyond which it is zero).
If creating a Volume STC, ``src`` must be passed in, and this
function will project sEEG sensors to nearby surrounding vertices.
Then the activation at each volume vertex is given by the mode
in the same way as ECoG surface projections.
.. versionadded:: 0.22
"""
from scipy.spatial.distance import cdist, pdist
from .evoked import Evoked
_validate_type(evoked, Evoked, 'evoked')
_validate_type(mode, str, 'mode')
_validate_type(src, (None, SourceSpaces), 'src')
_check_option('mode', mode, ('sum', 'single', 'nearest'))
# create a copy of Evoked using ecog and seeg
evoked = evoked.copy().pick_types(ecog=True, seeg=True)
# get channel positions that will be used to pinpoint where
# in the Source space we will use the evoked data
pos = evoked._get_channel_positions()
# remove nan channels
nan_inds = np.where(np.isnan(pos).any(axis=1))[0]
nan_chs = [evoked.ch_names[idx] for idx in nan_inds]
evoked.drop_channels(nan_chs)
pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds]
# coord_frame transformation from native mne "head" to MRI coord_frame
trans, _ = _get_trans(trans, 'head', 'mri', allow_none=True)
# convert head positions -> coord_frame MRI
pos = apply_trans(trans, pos)
subject = _check_subject(None, subject, False)
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if src is None: # fake a full surface one
rrs = [read_surface(op.join(subjects_dir, subject,
'surf', f'{hemi}.pial'))[0]
for hemi in ('lh', 'rh')]
src = SourceSpaces([
dict(rr=rr / 1000., vertno=np.arange(len(rr)), type='surf',
coord_frame=FIFF.FIFFV_COORD_MRI)
for rr in rrs])
del rrs
keep_all = False
else:
keep_all = True
# ensure it's a usable one
klass = dict(
surface=SourceEstimate,
volume=VolSourceEstimate,
mixed=MixedSourceEstimate,
)
_check_option('src.kind', src.kind, sorted(klass.keys()))
klass = klass[src.kind]
rrs = np.concatenate([s['rr'][s['vertno']] for s in src])
if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:
rrs = apply_trans(trans, rrs)
# projection will only occur with surfaces
logger.info(
f'Projecting data from {len(pos)} sensor{_pl(pos)} onto {len(rrs)} '
f'{src.kind} vertices: {mode} mode')
if project and src.kind == 'surface':
logger.info(' Projecting electrodes onto surface')
pos = _project_onto_surface(pos, dict(rr=rrs), project_rrs=True,
method='nearest')[2]
min_dist = pdist(pos).min() * 1000
logger.info(
f' Minimum {"projected " if project else ""}intra-sensor distance: '
f'{min_dist:0.1f} mm')
# compute pairwise distance between source space points and sensors
dists = cdist(rrs, pos)
assert dists.shape == (len(rrs), len(pos))
# only consider vertices within our "epsilon-ball"
# characterized by distance kwarg
vertices = np.where((dists <= distance).any(-1))[0]
logger.info(f' {len(vertices)} / {len(rrs)} non-zero vertices')
w = np.maximum(1. - dists[vertices] / distance, 0)
# now we triage based on mode
if mode in ('single', 'nearest'):
range_ = np.arange(w.shape[0])
idx = np.argmax(w, axis=1)
vals = w[range_, idx] if mode == 'single' else 1.
w.fill(0)
w[range_, idx] = vals
missing = np.where(~np.any(w, axis=0))[0]
if len(missing):
warn(f'Channel{_pl(missing)} missing in STC: '
f'{", ".join(evoked.ch_names[mi] for mi in missing)}')
nz_data = w @ evoked.data
if not keep_all:
assert src.kind == 'surface'
data = nz_data
offset = len(src[0]['vertno'])
vertices = [vertices[vertices < offset],
vertices[vertices >= offset] - offset]
else:
data = np.zeros(
(sum(len(s['vertno']) for s in src), len(evoked.times)),
dtype=nz_data.dtype)
data[vertices] = nz_data
vertices = [s['vertno'].copy() for s in src]
return klass(data, vertices, evoked.times[0], 1. / evoked.info['sfreq'],
subject=subject, verbose=verbose)
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/indexes/interval/test_interval.py | 1 | 52262 | from __future__ import division
from itertools import permutations
import re
import numpy as np
import pytest
from pandas.compat import lzip
import pandas as pd
from pandas import (
Index, Interval, IntervalIndex, Timedelta, Timestamp, date_range,
interval_range, isna, notna, timedelta_range)
import pandas.core.common as com
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
@pytest.mark.parametrize('breaks', [
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(['20170101', '20170202', '20170303', '20170404']),
pd.to_timedelta(['1ns', '2ms', '3s', '4M', '5H', '6D'])])
def test_length(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.length
expected = Index(iv.length for iv in index)
tm.assert_index_equal(result, expected)
# with NA
index = index.insert(1, np.nan)
result = index.length
expected = Index(iv.length if notna(iv) else iv for iv in index)
tm.assert_index_equal(result, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert index.hasnans is False
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans is True
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex(index._ndarray_values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right')])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with pytest.raises(ValueError, match=msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with pytest.raises(ValueError, match=msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_is_unique_interval(self, closed):
"""
Interval specific tests for is_unique in addition to base class tests
"""
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique is True
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique is True
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique is True
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_frame_repr(self):
# https://github.com/pandas-dev/pandas/pull/24134/files
df = pd.DataFrame({'A': [1, 2, 3, 4]},
index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4]))
result = repr(df)
expected = (
' A\n'
'(0, 1] 1\n'
'(1, 2] 2\n'
'(2, 3] 3\n'
'(3, 4] 4'
)
assert result == expected
# TODO: check this behavior is consistent with test_interval_new.py
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_value(self):
with pytest.raises(KeyError, match="^0$"):
self.index.get_loc(0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
with pytest.raises(KeyError, match="^-1$"):
self.index.get_loc(-1)
with pytest.raises(KeyError, match="^3$"):
self.index.get_loc(3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='intp'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='intp'))
assert idx.get_loc(3) == 1
with pytest.raises(KeyError, match=r"^3\.5$"):
idx.get_loc(3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
msg = ("'can only get slices from an IntervalIndex if bounds are"
" non-overlapping and all monotonic increasing or decreasing'")
with pytest.raises(KeyError, match=msg):
index.slice_locs(1, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
msg = r"Interval\(2, 3, closed='right'\)"
with pytest.raises(KeyError, match=msg):
self.index.get_loc(Interval(2, 3))
msg = r"Interval\(-1, 0, closed='left'\)"
with pytest.raises(KeyError, match=msg):
self.index.get_loc(Interval(-1, 0, 'left'))
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [3, Interval(1, 4)])
def test_get_loc_length_one(self, item, closed):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_loc(item)
assert result == 0
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_get_loc_datetimelike_nonoverlapping(self, breaks):
# GH 20636
# nonoverlapping = IntervalIndex method and no i8 conversion
index = IntervalIndex.from_breaks(breaks)
value = index[0].mid
result = index.get_loc(value)
expected = 0
assert result == expected
interval = Interval(index[0].left, index[1].right)
result = index.get_loc(interval)
expected = slice(0, 2)
assert result == expected
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_loc_datetimelike_overlapping(self, arrays):
# GH 20636
# overlapping = IntervalTree method with i8 conversion
index = IntervalIndex.from_arrays(*arrays)
value = index[0].mid + Timedelta('12 hours')
result = np.sort(index.get_loc(value))
expected = np.array([0, 1], dtype='intp')
assert tm.assert_numpy_array_equal(result, expected)
interval = Interval(index[0].left, index[1].right)
result = np.sort(index.get_loc(interval))
expected = np.array([0, 1, 2], dtype='intp')
assert tm.assert_numpy_array_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [
[3], np.arange(1, 5), [Interval(1, 4)], interval_range(1, 4)])
def test_get_indexer_length_one(self, item, closed):
# GH 17284
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_indexer(item)
expected = np.array([0] * len(item), dtype='intp')
tm.assert_numpy_array_equal(result, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_reindexer_datetimelike(self, arrays):
# GH 20636
index = IntervalIndex.from_arrays(*arrays)
tuples = [(index[0].left, index[0].left + pd.Timedelta('12H')),
(index[-1].right - pd.Timedelta('12H'), index[-1].right)]
target = IntervalIndex.from_tuples(tuples)
result = index._get_reindexer(target)
expected = np.array([0, 3], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_maybe_convert_i8(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
# intervalindex
result = index._maybe_convert_i8(index)
expected = IntervalIndex.from_breaks(breaks.asi8)
tm.assert_index_equal(result, expected)
# interval
interval = Interval(breaks[0], breaks[1])
result = index._maybe_convert_i8(interval)
expected = Interval(breaks[0].value, breaks[1].value)
assert result == expected
# datetimelike index
result = index._maybe_convert_i8(breaks)
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
# datetimelike scalar
result = index._maybe_convert_i8(breaks[0])
expected = breaks[0].value
assert result == expected
# list-like of datetimelike scalars
result = index._maybe_convert_i8(list(breaks))
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('2018-01-01', periods=5),
timedelta_range('0 days', periods=5)])
def test_maybe_convert_i8_nat(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
to_convert = breaks._constructor([pd.NaT] * 3)
expected = pd.Float64Index([np.nan] * 3)
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
to_convert = to_convert.insert(0, breaks[0])
expected = expected.insert(0, float(breaks[0].value))
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
np.arange(5, dtype='int64'),
np.arange(5, dtype='float64')], ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_numeric(self, breaks, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
key = make_key(breaks)
# no conversion occurs for numeric
result = index._maybe_convert_i8(key)
assert result is key
@pytest.mark.parametrize('breaks1, breaks2', permutations([
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], 2), ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks1)
key = make_key(breaks2)
msg = ('Cannot index an IntervalIndex of subtype {dtype1} with '
'values of dtype {dtype2}')
msg = re.escape(msg.format(dtype1=breaks1.dtype, dtype2=breaks2.dtype))
with pytest.raises(ValueError, match=msg):
index._maybe_convert_i8(key)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
# TODO: check this behavior is consistent with test_interval_new.py
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
@pytest.mark.parametrize("sort", [None, False])
def test_union(self, closed, sort):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index[::-1].union(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
result = other[::-1].union(index, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
tm.assert_index_equal(index.union(index, sort=sort), index)
tm.assert_index_equal(index.union(index[:1], sort=sort), index)
# GH 19101: empty result, same dtype
index = IntervalIndex(np.array([], dtype='int64'), closed=closed)
result = index.union(index, sort=sort)
tm.assert_index_equal(result, index)
# GH 19101: empty result, different dtypes
other = IntervalIndex(np.array([], dtype='float64'), closed=closed)
result = index.union(other, sort=sort)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, closed, sort):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index[::-1].intersection(other, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
result = other[::-1].intersection(index, sort=sort)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
tm.assert_index_equal(index.intersection(index, sort=sort), index)
# GH 19101: empty result, same dtype
other = IntervalIndex.from_breaks(range(300, 314), closed=closed)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
breaks = np.arange(300, 314, dtype='float64')
other = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.intersection(other, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_difference(self, closed, sort):
index = IntervalIndex.from_arrays([1, 0, 3, 2],
[1, 2, 3, 4],
closed=closed)
result = index.difference(index[:1], sort=sort)
expected = index[1:]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# GH 19101: empty result, same dtype
result = index.difference(index, sort=sort)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
tm.assert_index_equal(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(index.left.astype('float64'),
index.right, closed=closed)
result = index.difference(other, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_symmetric_difference(self, closed, sort):
index = self.create_index(closed=closed)
result = index[1:].symmetric_difference(index[:-1], sort=sort)
expected = IntervalIndex([index[0], index[-1]])
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
# GH 19101: empty result, same dtype
result = index.symmetric_difference(index, sort=sort)
expected = IntervalIndex(np.array([], dtype='int64'), closed=closed)
if sort is None:
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
# GH 19101: empty result, different dtypes
other = IntervalIndex.from_arrays(index.left.astype('float64'),
index.right, closed=closed)
result = index.symmetric_difference(other, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
@pytest.mark.parametrize("sort", [None, False])
def test_set_operation_errors(self, closed, op_name, sort):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# non-IntervalIndex
msg = ('the other index needs to be an IntervalIndex too, but '
'was type Int64Index')
with pytest.raises(TypeError, match=msg):
set_op(Index([1, 2, 3]), sort=sort)
# mixed closed
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with pytest.raises(ValueError, match=msg):
set_op(other, sort=sort)
# GH 19016: incompatible dtypes
other = interval_range(Timestamp('20180101'), periods=9, closed=closed)
msg = ('can only do {op} between two IntervalIndex objects that have '
'compatible dtypes').format(op=op_name)
with pytest.raises(TypeError, match=msg):
set_op(other, sort=sort)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with pytest.raises(TypeError, match='unorderable types'):
self.index > 0
with pytest.raises(TypeError, match='unorderable types'):
self.index <= 0
msg = r"unorderable types: Interval\(\) > int\(\)"
with pytest.raises(TypeError, match=msg):
self.index > np.arange(2)
msg = "Lengths must match to compare"
with pytest.raises(ValueError, match=msg):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
msg = ("missing values must be missing in the same location both left"
" and right sides")
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_datetime(self, tz):
start = Timestamp('2000-01-01', tz=tz)
dates = date_range(start=start, periods=10)
index = IntervalIndex.from_breaks(dates)
# test mid
start = Timestamp('2000-01-01T12:00', tz=tz)
expected = date_range(start=start, periods=9)
tm.assert_index_equal(index.mid, expected)
# __contains__ doesn't check individual points
assert Timestamp('2000-01-01', tz=tz) not in index
assert Timestamp('2000-01-01T12', tz=tz) not in index
assert Timestamp('2000-01-02', tz=tz) not in index
iv_true = Interval(Timestamp('2000-01-01T08', tz=tz),
Timestamp('2000-01-01T18', tz=tz))
iv_false = Interval(Timestamp('1999-12-31', tz=tz),
Timestamp('2000-01-01', tz=tz))
assert iv_true in index
assert iv_false not in index
# .contains does check individual points
assert not index.contains(Timestamp('2000-01-01', tz=tz))
assert index.contains(Timestamp('2000-01-01T12', tz=tz))
assert index.contains(Timestamp('2000-01-02', tz=tz))
assert index.contains(iv_true)
assert not index.contains(iv_false)
# test get_indexer
start = Timestamp('1999-12-31T12:00', tz=tz)
target = date_range(start=start, periods=7, freq='12H')
actual = index.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
start = Timestamp('2000-01-08T18:00', tz=tz)
target = date_range(start=start, periods=7, freq='6H')
actual = index.get_indexer(target)
expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with pytest.raises(ValueError, match=msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', otherwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
@pytest.mark.parametrize('start, shift, na_value', [
(0, 1, np.nan),
(Timestamp('2018-01-01'), Timedelta('1 day'), pd.NaT),
(Timedelta('0 days'), Timedelta('1 day'), pd.NaT)])
def test_is_overlapping(self, start, shift, na_value, closed):
# GH 23309
# see test_interval_tree.py for extensive tests; interface tests here
# non-overlapping
tuples = [(start + n * shift, start + (n + 1) * shift)
for n in (0, 2, 4)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# non-overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# overlapping
tuples = [(start + n * shift, start + (n + 2) * shift)
for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# common endpoints
tuples = [(start + n * shift, start + (n + 1) * shift)
for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
expected = closed == 'both'
assert result is expected
# common endpoints with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
assert result is expected
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)),
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)),
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10))])
def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
expected = Index(com.asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tuples', [
lzip(range(10), range(1, 11)) + [np.nan],
lzip(date_range('20170101', periods=10),
date_range('20170101', periods=10)) + [np.nan],
lzip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10)) + [np.nan]])
@pytest.mark.parametrize('na_tuple', [True, False])
def test_to_tuples_na(self, tuples, na_tuple):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
expected_notna = Index(com.asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
# check the NA portion
result_na = result[-1]
if na_tuple:
assert isinstance(result_na, tuple)
assert len(result_na) == 2
assert all(isna(x) for x in result_na)
else:
assert isna(result_na)
def test_nbytes(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
result = IntervalIndex.from_arrays(left, right).nbytes
expected = 64 # 4 * 8 * 2
assert result == expected
def test_itemsize(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
expected = 16 # 8 * 2
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = IntervalIndex.from_arrays(left, right).itemsize
assert result == expected
@pytest.mark.parametrize('new_closed', [
'left', 'right', 'both', 'neither'])
def test_set_closed(self, name, closed, new_closed):
# GH 21670
index = interval_range(0, 5, closed=closed, name=name)
result = index.set_closed(new_closed)
expected = interval_range(0, 5, closed=new_closed, name=name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('bad_closed', ['foo', 10, 'LEFT', True, False])
def test_set_closed_errors(self, bad_closed):
# GH 21670
index = interval_range(0, 5)
msg = "invalid option for 'closed': {closed}".format(closed=bad_closed)
with pytest.raises(ValueError, match=msg):
index.set_closed(bad_closed)
def test_is_all_dates(self):
# GH 23576
year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'))
year_2017_index = pd.IntervalIndex([year_2017])
assert not year_2017_index.is_all_dates
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/scipy/stats/_multivariate.py | 12 | 112182 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from ._discrete_distns import binom
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""
A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.multinomial : Sampling from the multinomial distribution.
"""
def __init__(self, seed=None):
super(multinomial_gen, self).__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""
Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""
Return: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[...,-1] = 1. - p[...,:-1].sum(axis=-1)
# true for bad p
pcond = np.any(p <= 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""
Return: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." % (xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""
Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""
Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""
Mean of the Multinomial distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""
Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[...,i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""
Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""
Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""
Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""
A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
----------
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> np.random.seed(514)
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))
>>> x
array([[ 1. , -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1. , -0.24351129, 0.06703474],
[ 0.18366501, -0.24351129, 1. , 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super(random_correlation_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];
the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i,i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""
Draw random correlation matrices
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
| mit |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/test_algos.py | 3 | 57570 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex,
Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.util.testing as tm
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestSafeSort(object):
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = algos.safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = algos.safe_sort(values)
expected = np.array(list("aaabbc"))
tm.assert_numpy_array_equal(result, expected)
values = []
result = algos.safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
def test_labels(self):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
labels = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_labels = algos.safe_sort(values, labels)
expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# na_sentinel
labels = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_labels = algos.safe_sort(values, labels,
na_sentinel=99)
expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
# out of bound indices
labels = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_labels = algos.safe_sort(values, labels)
expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
labels = []
result, result_labels = algos.safe_sort(values, labels)
expected_labels = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_mixed_integer(self):
values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object)
result = algos.safe_sort(values)
expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(['b', 1, 0, 'a'], dtype=object)
labels = [0, 1, 2, 3, 0, -1, 1]
result, result_labels = algos.safe_sort(values, labels)
expected = np.array([0, 1, 'a', 'b'], dtype=object)
expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_labels, expected_labels)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
if compat.PY2 and not pd._np_version_under1p10:
# RuntimeWarning: tp_compare didn't return -1 or -2 for exception
with tm.assert_produces_warning(RuntimeWarning):
pytest.raises(TypeError, algos.safe_sort, arr)
else:
pytest.raises(TypeError, algos.safe_sort, arr)
def test_exceptions(self):
with tm.assert_raises_regex(TypeError,
"Only list-like objects are allowed"):
algos.safe_sort(values=1)
with tm.assert_raises_regex(TypeError,
"Only list-like objects or None"):
algos.safe_sort(values=[0, 1, 2], labels=1)
with tm.assert_raises_regex(ValueError,
"values should be unique"):
algos.safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = pd.Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = pd.Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = hashtable.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isnull(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isnull(key), expected == na_sentinel)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = pd.Categorical(list('bac'),
categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = pd.Categorical(list('bac'),
categories=list('abc'),
ordered=True)
# GH 15939
c = pd.Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = pd.Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(pd.Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = pd.CategoricalIndex(pd.Categorical(list('baabc'),
categories=list('bac')))
expected = pd.CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(pd.Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(pd.Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(pd.Categorical(list('aabc'))))
expected = pd.Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
expected_index = pd.IntervalIndex.from_breaks(
breaks).astype('category')
expected = Series([1, 1, 1, 1],
index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = pd.Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(pd.Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1],
index=pd.CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(pd.Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=pd.CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=pd.CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(pd.Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=pd.Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
def test_numeric_object_likes(self):
cases = [np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10,
2**63, 39, 1, 3**5, 7], dtype=np.uint64)]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [pd.Index(case), pd.Index(case, dtype='category')]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category')]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([pd.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([pd.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_last = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_last
for case in cases:
res_first = algos.duplicated(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = algos.duplicated(case, keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = algos.duplicated(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [pd.Index(case), pd.Index(case, dtype='category'),
pd.Index(case, dtype=object)]:
res_first = idx.duplicated(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_last = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(res_last, exp_last)
res_false = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# series
for s in [Series(case), Series(case, dtype='category'),
Series(case, dtype=object)]:
res_first = s.duplicated(keep='first')
tm.assert_series_equal(res_first, Series(exp_first))
res_last = s.duplicated(keep='last')
tm.assert_series_equal(res_last, Series(exp_last))
res_false = s.duplicated(keep=False)
tm.assert_series_equal(res_false, Series(exp_false))
def test_unique_index(self):
cases = [pd.Index([1, 2, 3]), pd.RangeIndex(0, 3)]
for case in cases:
assert case.is_unique
tm.assert_numpy_array_equal(case.duplicated(),
np.array([False, False, False]))
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([values[:, 0]
.reshape(5, 2, order='F')
.std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)]).T.astype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, check_less_precise=6)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = libgroupby.group_var_float32
dtype = np.float32
rtol = 1e-2
class TestHashTable(object):
def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = hashtable.Float64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_lookup_overflow(self):
xs = np.array([1, 2, 2**63], dtype=np.uint64)
m = hashtable.UInt64HashTable()
m.map_locations(xs)
tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs),
dtype=np.int64))
def test_get_unique(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(s.unique(), exp)
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels appends to the vector
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array resizes the vector
uniques.to_array()
htable.get_labels(vals, uniques, 0, -1)
test_cases = [
(hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'),
(hashtable.StringHashTable, hashtable.ObjectVector, 'object'),
(hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'),
(hashtable.Int64HashTable, hashtable.Int64Vector, 'int64'),
(hashtable.UInt64HashTable, hashtable.UInt64Vector, 'uint64')]
for (tbl, vect, dtype) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
a[np.random.choice(len(a), 10)] = -1
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right,
check_dtype=False)
class TestRank(object):
def test_scipy_compat(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
def _check(arr):
mask = ~np.isfinite(arr)
arr = arr.copy()
result = libalgos.rank_1d_float64(arr)
arr[mask] = np.inf
exp = rankdata(arr)
exp[mask] = nan
assert_almost_equal(result, exp)
_check(np.array([nan, nan, 5., 5., 5., nan, 1, 2, 3, nan]))
_check(np.array([4., nan, 5., 5., 5., nan, 1, 2, 4., nan]))
def test_basic(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in np.typecodes['AllInteger']:
s = Series([1, 100], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_uint64_overflow(self):
exp = np.array([1, 2], dtype=np.float64)
for dtype in [np.float64, np.uint64]:
s = Series([1, 2**63], dtype=dtype)
tm.assert_numpy_array_equal(algos.rank(s), exp)
def test_too_many_ndims(self):
arr = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
msg = "Array with ndim > 2 are not supported"
with tm.assert_raises_regex(TypeError, msg):
algos.rank(arr)
def test_pad_backfill_object_segfault():
old = np.array([], dtype='O')
new = np.array([datetime(2010, 12, 31)], dtype='O')
result = libalgos.pad_object(old, new)
expected = np.array([-1], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.pad_object(new, old)
expected = np.array([], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.backfill_object(old, new)
expected = np.array([-1], dtype=np.int64)
assert (np.array_equal(result, expected))
result = libalgos.backfill_object(new, old)
expected = np.array([], dtype=np.int64)
assert (np.array_equal(result, expected))
def test_arrmap():
values = np.array(['foo', 'foo', 'bar', 'bar', 'baz', 'qux'], dtype='O')
result = libalgos.arrmap_object(values, lambda x: x in ['foo', 'bar'])
assert (result.dtype == np.bool_)
class TestTseriesUtil(object):
def test_combineFunc(self):
pass
def test_reindex(self):
pass
def test_isnull(self):
pass
def test_groupby(self):
pass
def test_groupby_withnull(self):
pass
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([0, 0, 1, 1, 1, 1,
2, 2, 2, 2, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([1, 4])
new = Index(lrange(5, 10))
filler = libalgos.backfill_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_pad(self):
old = Index([1, 5, 10])
new = Index(lrange(12))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, 0, 0, 0, 0, 1,
1, 1, 1, 1, 2, 2], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
# corner case
old = Index([5, 10])
new = Index(lrange(5))
filler = libalgos.pad_int64(old.values, new.values)
expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.int64)
tm.assert_numpy_array_equal(filler, expect_filler)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13,
12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10,
9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7,
6, 5,
4, 3, 2, 1, 0])]
assert (not libalgos.is_lexsorted(failure))
# def test_get_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64)
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64)
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64)
# result = lib.get_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsort_indexer():
a = np.random.randint(0, 1000, 100).astype(np.int64)
b = np.random.randint(0, 1000, 100).astype(np.int64)
result = libalgos.groupsort_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='mergesort')
assert (np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = libalgos.groupsort_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert (np.array_equal(result, expected))
def test_infinity_sort():
# GH 13445
# numpy's argsort can be unhappy if something is less than
# itself. Instead, let's give our infinities a self-consistent
# ordering, but outside the float extended real line.
Inf = libalgos.Infinity()
NegInf = libalgos.NegInfinity()
ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
assert all(Inf >= x for x in ref_nums)
assert all(Inf > x or x is Inf for x in ref_nums)
assert Inf >= Inf and Inf == Inf
assert not Inf < Inf and not Inf > Inf
assert all(NegInf <= x for x in ref_nums)
assert all(NegInf < x or x is NegInf for x in ref_nums)
assert NegInf <= NegInf and NegInf == NegInf
assert not NegInf < NegInf and not NegInf > NegInf
for perm in permutations(ref_nums):
assert sorted(perm) == ref_nums
# smoke tests
np.array([libalgos.Infinity()] * 32).argsort()
np.array([libalgos.NegInfinity()] * 32).argsort()
def test_ensure_platform_int():
arr = np.arange(100, dtype=np.intp)
result = libalgos.ensure_platform_int(arr)
assert (result is arr)
def test_int64_add_overflow():
# see gh-14068
msg = "Overflow in int64 addition"
m = np.iinfo(np.int64).max
n = np.iinfo(np.int64).min
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), m)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), n)
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([n, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, n]), np.array([n, n]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([False, True]),
b_mask=np.array([False, True]))
with tm.assert_raises_regex(OverflowError, msg):
with tm.assert_produces_warning(RuntimeWarning):
algos.checked_add_with_arr(np.array([m, m]),
np.array([np.nan, m]))
# Check that the nan boolean arrays override whether or not
# the addition overflows. We don't check the result but just
# the fact that an OverflowError is not raised.
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
b_mask=np.array([True, True]))
with pytest.raises(AssertionError):
with tm.assert_raises_regex(OverflowError, msg):
algos.checked_add_with_arr(np.array([m, m]), np.array([m, m]),
arr_mask=np.array([True, False]),
b_mask=np.array([False, True]))
class TestMode(object):
def test_no_mode(self):
exp = Series([], dtype=np.float64)
tm.assert_series_equal(algos.mode([]), exp)
def test_mode_single(self):
# GH 15714
exp_single = [1]
data_single = [1]
exp_multi = [1]
data_multi = [1, 1]
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1], dtype=np.int)
tm.assert_series_equal(algos.mode([1]), exp)
exp = Series(['a', 'b', 'c'], dtype=np.object)
tm.assert_series_equal(algos.mode(['a', 'b', 'c']), exp)
def test_number_mode(self):
exp_single = [1]
data_single = [1] * 5 + [2] * 3
exp_multi = [1, 3]
data_multi = [1] * 5 + [2] * 3 + [3] * 5
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
s = Series(data_single, dtype=dt)
exp = Series(exp_single, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
s = Series(data_multi, dtype=dt)
exp = Series(exp_multi, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_strobj_mode(self):
exp = ['b']
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
exp = Series(exp, dtype='c')
tm.assert_series_equal(algos.mode(s), exp)
exp = ['bar']
data = ['foo'] * 2 + ['bar'] * 3
for dt in [str, object]:
s = Series(data, dtype=dt)
exp = Series(exp, dtype=dt)
tm.assert_series_equal(algos.mode(s), exp)
def test_datelike_mode(self):
exp = Series(['1900-05-03', '2011-01-03',
'2013-01-02'], dtype="M8[ns]")
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02'], dtype='M8[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_timedelta_mode(self):
exp = Series(['-1 days', '0 days', '1 days'],
dtype='timedelta64[ns]')
s = Series(['1 days', '-1 days', '0 days'],
dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(s), exp)
def test_mixed_dtype(self):
exp = Series(['foo'])
s = Series([1, 'foo', 'foo'])
tm.assert_series_equal(algos.mode(s), exp)
def test_uint64_overflow(self):
exp = Series([2**63], dtype=np.uint64)
s = Series([1, 2**63, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
exp = Series([1, 2**63], dtype=np.uint64)
s = Series([1, 2**63], dtype=np.uint64)
tm.assert_series_equal(algos.mode(s), exp)
def test_categorical(self):
c = Categorical([1, 2])
exp = c
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 'a', 'a'])
exp = Categorical(['a'], categories=[1, 'a'])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
c = Categorical([1, 1, 2, 3, 3])
exp = Categorical([1, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(algos.mode(c), exp)
tm.assert_categorical_equal(c.mode(), exp)
def test_index(self):
idx = Index([1, 2, 3])
exp = Series([1, 2, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 'a', 'a'])
exp = Series(['a'], dtype=object)
tm.assert_series_equal(algos.mode(idx), exp)
idx = Index([1, 1, 2, 3, 3])
exp = Series([1, 3], dtype=np.int64)
tm.assert_series_equal(algos.mode(idx), exp)
exp = Series(['2 min', '1 day'], dtype='timedelta64[ns]')
idx = Index(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min'], dtype='timedelta64[ns]')
tm.assert_series_equal(algos.mode(idx), exp)
| mit |
quantumlib/ReCirq | recirq/otoc/parallel_xeb.py | 1 | 31044 | # Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for performing parallel cross entropy benchmarking."""
from dataclasses import dataclass
from typing import Sequence, List, Set, Tuple, Dict, Union, Optional
import cirq
import numpy as np
import pybobyqa
from cirq.experiments.cross_entropy_benchmarking import _default_interaction_sequence
from matplotlib import pyplot as plt
from recirq.otoc.utils import (
bits_to_probabilities,
angles_to_fsim,
pauli_error_fit,
generic_fsim_gate,
cz_to_sqrt_iswap,
)
_rot_ops = [
cirq.X ** 0.5,
cirq.PhasedXPowGate(phase_exponent=0.25, exponent=0.5),
cirq.Y ** 0.5,
cirq.PhasedXPowGate(phase_exponent=0.75, exponent=0.5),
cirq.X ** -0.5,
cirq.PhasedXPowGate(phase_exponent=-0.75, exponent=0.5),
cirq.Y ** -0.5,
cirq.PhasedXPowGate(phase_exponent=-0.25, exponent=0.5),
]
_rot_mats = [cirq.unitary(r) for r in _rot_ops]
_fsim_angle_labels = [
"theta",
"delta_plus",
"delta_minus_off_diag",
"delta_minus_diag",
"phi",
]
@dataclass
class XEBData:
"""Class for storing the cycle-dependent fidelities and purities of an XEB experiment.
Each field contains two 1D arrays representing the circuit length (i.e. number of cycles) and
the corresponding fidelity or purity values. Fields containing 'fit' in their names are
fitting results to an exponential decay.
"""
fidelity_optimized: Tuple[np.ndarray, np.ndarray]
fidelity_optimized_fit: Tuple[np.ndarray, np.ndarray]
fidelity_unoptimized: Tuple[np.ndarray, np.ndarray]
fidelity_unoptimized_fit: Tuple[np.ndarray, np.ndarray]
purity: Tuple[np.ndarray, np.ndarray]
purity_fit: Tuple[np.ndarray, np.ndarray]
@dataclass
class ParallelXEBResults:
"""Class for storing results of a parallel-XEB experiment."""
fitted_gates: Dict[Tuple[Tuple[int, int], Tuple[int, int]], cirq.Circuit]
correction_gates: Dict[Tuple[Tuple[int, int], Tuple[int, int]], cirq.Circuit]
fitted_angles: Dict[Tuple[Tuple[int, int], Tuple[int, int]], Dict[str, float]]
final_errors_optimized: Dict[Tuple[Tuple[int, int], Tuple[int, int]], float]
final_errors_unoptimized: Dict[Tuple[Tuple[int, int], Tuple[int, int]], float]
purity_errors: Dict[Tuple[Tuple[int, int], Tuple[int, int]], float]
raw_data: Dict[Tuple[Tuple[int, int], Tuple[int, int]], XEBData]
def plot_xeb_results(xeb_results: ParallelXEBResults) -> None:
"""Plots the results of a parallel XEB experiment."""
for (q0, q1), xeb_data in xeb_results.raw_data.items():
# Plot the fidelities (both with unoptimized and optimized two-qubit unitaries) and speckle
# purities as functions of XEB cycles, for each qubit pair.
err_0 = xeb_results.final_errors_unoptimized[(q0, q1)]
err_1 = xeb_results.final_errors_optimized[(q0, q1)]
err_p = xeb_results.purity_errors[(q0, q1)]
fig = plt.figure()
plt.plot(
xeb_data.fidelity_unoptimized[0],
xeb_data.fidelity_unoptimized[1],
"ro",
figure=fig,
label=r"{} and {}, unoptimized [$r_p$ = {}]".format(q0, q1, err_0.__round__(5)),
)
plt.plot(
xeb_data.fidelity_optimized[0],
xeb_data.fidelity_optimized[1],
"bo",
figure=fig,
label=r"{} and {}, optimized [$r_p$ = {}]".format(q0, q1, err_1.__round__(5)),
)
plt.plot(
xeb_data.purity[0],
xeb_data.purity[1],
"go",
figure=fig,
label=r"{} and {}, purity error = {}".format(q0, q1, err_p.__round__(5)),
)
plt.plot(xeb_data.fidelity_unoptimized_fit[0], xeb_data.fidelity_unoptimized_fit[1], "r--")
plt.plot(xeb_data.fidelity_optimized_fit[0], xeb_data.fidelity_optimized_fit[1], "b--")
plt.plot(xeb_data.purity_fit[0], xeb_data.purity_fit[1], "g--")
plt.legend()
plt.xlabel("Number of Cycles")
plt.ylabel(r"XEB Fidelity")
num_pairs = len(list(xeb_results.final_errors_optimized.keys()))
pair_pos = np.linspace(0, 1, num_pairs)
# Plot the integrated histogram of Pauli errors for all pairs.
fig_0 = plt.figure()
plt.plot(
sorted(xeb_results.final_errors_unoptimized.values()),
pair_pos,
figure=fig_0,
label="Unoptimized Unitaries",
)
plt.plot(
sorted(xeb_results.final_errors_optimized.values()),
pair_pos,
figure=fig_0,
label="Optimized Unitaries",
)
plt.plot(
sorted(xeb_results.purity_errors.values()),
pair_pos,
figure=fig_0,
label="Purity Errors",
)
plt.xlabel(r"Pauli Error Rate, $r_p$")
plt.ylabel(r"Integrated Histogram")
plt.legend()
# Plot the shifts in the FSIM angles derived from fitting the XEB data.
fig_1 = plt.figure()
for label in _fsim_angle_labels:
shifts = [a[label] for a in xeb_results.fitted_angles.values()]
plt.plot(sorted(shifts), pair_pos, figure=fig_1, label=label)
plt.xlabel(r"FSIM Angle Error (Radian)")
plt.ylabel(r"Integrated Histogram")
plt.legend()
def build_xeb_circuits(
qubits: Sequence[cirq.GridQubit],
cycles: Sequence[int],
benchmark_ops: Sequence[Union[cirq.Moment, Sequence[cirq.Moment]]] = None,
random_seed: int = None,
sq_rand_nums: Optional[np.ndarray] = None,
reverse: bool = False,
z_only: bool = False,
ancilla: Optional[cirq.GridQubit] = None,
cycles_per_echo: Optional[int] = None,
light_cones: Optional[List[List[Set[cirq.GridQubit]]]] = None,
echo_indices: Optional[np.ndarray] = None,
) -> Tuple[List[cirq.Circuit], np.ndarray]:
r"""Builds random circuits for cross entropy benchmarking (XEB).
A list of cirq.Circuits of varying lengths are generated, which are made of random
single-qubit gates and optional two-qubit gates.
Args:
qubits: The qubits to be involved in XEB.
cycles: The different numbers of cycles the random circuits will have.
benchmark_ops: The operations to be inserted between random single-qubit gates. They can
be one or more cirq.Moment objects, or None (in which case no operation will be
inserted between the random single-qubit gates).
random_seed: The random seed for the single-qubit gates. If unspecified, no random seed
will be used.
sq_rand_nums: The random numbers representing the single-qubit gates. They must be
integers from 0 to 7 if the z_only is False, and floats between -1 and 1 if z_only is
True. The dimension of sq_rand_nums should be len(qubits) by max(cycles). If
unspecified, the gates will be generated in-situ at random.
reverse: If True, benchmark_ops will be applied before the random single-qubit gates in
each cycle. Otherwise, it will be applied after the random single-qubit gates.
z_only: Whether the single-qubit gates are to be random \pi/2 rotations around axes on
the equatorial plane of the Bloch sphere (z_only = False), or random rotations around
the z-axis (z_only = True). In the former case, the axes of rotations will be chosen
randomly from 8 evenly spaced axes ($\pi/4$, $\pi/2$ ... $7\pi/4$ radians from the
x-axis). In the latter case, the angles of rotation will be any random value between
$-\pi$ and $\pi$.
ancilla: If specified, an additional qubit will be included in the circuit which does not
interact with the other qubits and only has spin-echo pulses applied to itself.
cycles_per_echo: How often a spin-echo (Y gate) gate is to be applied to the ancilla
qubit. For example, if the value is 2, a Y gate will be applied every other cycle.
light_cones: A list of length 1 or 2, each specifying a lightcone correponding to a list
of sets of qubits with the same length as max(cycles). For each cycle, single-qubit
gates outside the first lightcone are either removed or replaced with a spin-echo
pulse. Single-qubit gates outside the second lightcone, if specified, are always
removed.
echo_indices: An array with the same dimension as sq_rand_nums and random integer values
of 1, 2, 3 or 4. They specify the spin-echo pulses applied to qubits outside the
first lightcone, which can be +/-X or +/-Y gates.
Returns:
all_circuits: A list of random circuits, each containing a specified number of cycles.
sq_gate_indices: An NxM array, where N is the number of qubits and M is the maximnum
number of cycles. The array elements are the indices for the random single-qubit gates.
"""
if light_cones is not None:
if len(light_cones) > 2:
raise ValueError("light_cones may only have length 1 or 2")
if benchmark_ops is not None:
num_d = len(benchmark_ops)
else:
num_d = 0
max_cycles = max(cycles)
single_rots, sq_gate_indices = _random_rotations(
qubits,
max_cycles,
random_seed,
sq_rand_nums,
light_cones,
echo_indices,
z_rotations_only=z_only,
)
all_circuits = [] # type: List[cirq.Circuit]
for num_cycles in cycles:
circuit_exp = cirq.Circuit()
for i in range(num_cycles):
c = i + 1 if not reverse else num_cycles - i
if ancilla is not None and cycles_per_echo is not None:
if c % cycles_per_echo == 0:
op_list = [cirq.Y(ancilla)]
op_list.extend(single_rots[i])
rots = cirq.Moment(op_list)
else:
rots = cirq.Moment(single_rots[i])
else:
rots = cirq.Moment(single_rots[i])
if reverse:
if benchmark_ops is not None:
circuit_exp.append(benchmark_ops[i % num_d])
circuit_exp.append(rots, strategy=cirq.InsertStrategy.NEW)
else:
circuit_exp.append(rots, strategy=cirq.InsertStrategy.NEW)
if benchmark_ops is not None:
circuit_exp.append(benchmark_ops[i % num_d])
all_circuits.append(circuit_exp)
return all_circuits, sq_gate_indices
def parallel_xeb_fidelities(
all_qubits: List[Tuple[int, int]],
num_cycle_range: Sequence[int],
measured_bits: List[List[List[np.ndarray]]],
scrambling_gates: List[List[np.ndarray]],
fsim_angles: Dict[str, float],
interaction_sequence: Optional[
List[Set[Tuple[Tuple[float, float], Tuple[float, float]]]]
] = None,
gate_to_fit: str = "iswap",
num_restarts: int = 3,
num_points: int = 8,
print_fitting_progress: bool = True,
) -> ParallelXEBResults:
"""Computes and optimizes cycle fidelities from parallel XEB data.
Args:
all_qubits: List of qubits involved in a parallel XEB experiment, specified using their
(row, col) locations.
num_cycle_range: The different numbers of cycles in the random circuits.
measured_bits: The experimental bit-strings stored in a nested list. The first dimension
of the nested list represents different configurations (e.g. how the two-qubit gates
are applied) used in parallel XEB. The second dimension represents different trials
(i.e. random circuit instances) used in XEB. The third dimension represents the
different numbers of cycles and must be the same as len(num_cycle_range). Each
np.ndarray has dimension M x N, where M is the number of repetitions (stats) for each
circuit and N is the number of qubits involved.
scrambling_gates: The random circuit indices specified as integers between 0 and 7. See
the documentation of build_xeb_circuits for details. The first dimension of the
nested list represents the different configurations and must be the same as the first
dimension of measured_bits. The second dimension represents the different trials and
must be the same as the second dimension of measured_bits.
fsim_angles: An initial guess for the five FSIM angles for each qubit pair.
interaction_sequence: The pairs of qubits with FSIM applied for each configuration. Must
be the same as len(measured_bits).
gate_to_fit: Can be either 'iswap', 'sqrt-iswap', 'cz' or any other string. Determines
the FSIM angles that will be changed from their initial guess values to optimize the
XEB fidelity of each qubit pair. For 'iswap', only 'delta_plus' and
'delta_minus_off_diag' are changed. For 'sqrt-iswap', 'delta_plus',
'delta_minus_off_diag' and 'delta_minus_diag' are changed. For 'cz',
only 'delta_plus' and 'delta_minus_diag' are changed. For any other string, all five
angles are changed.
num_restarts: Number of restarts with different random initial guesses.
num_points: The total number of XEB fidelities to be used in the cost function for
optimization. Default is 8, such that the cost function is the sum of the XEB
fidelities for the first 8 numbers of cycles in num_cycle_range.
print_fitting_progress: Whether to print progress during the fitting process.
Returns:
A ParallelXEBResults object that contains the following fields:
fitted_gates: A dictionary with qubit pairs as keys and optimized FSIM unitaries,
represented by cirq.Circuit objects, as values.
correction_gates: Same as fitted_gates, but with all Z rotations reversed in signs.
fitted_angles: A dictionary with qubit pairs as keys and optimized FSIM unitaries as
values. Here the FSIM unitaries are represented as a dictionaries with the names of
the FSIM phases as keys and their fitted values as values.
final_errors_optimized: A dictionary with qubit pairs as keys and their cycle errors
after fitting as values.
final_errors_unoptimized: A dictionary with qubit pairs as keys and their cycle errors
before fitting as values.
purity_errors: A dictionary with qubit pairs as keys and their speckle purity errors per
cycle as values.
raw_data: A dictionary with qubit pairs as keys and XEBData as values. Each XEBData
contains the cycle-dependent XEB fidelities and purities, as well as their fits.
"""
num_trials = len(measured_bits[0])
p_data_all, sq_gates = _pairwise_xeb_probabilities(
all_qubits,
num_cycle_range,
measured_bits,
scrambling_gates,
interaction_sequence,
)
final_errors_unoptimized = {}
final_errors_optimized = {}
delta_angles = {}
purity_errors = {}
fitted_gates = {}
fitted_angles = {}
correction_gates = {}
raw_data = {}
for (q0, q1), p_data in p_data_all.items():
if print_fitting_progress:
print("Fitting qubits {} and {}".format(q0, q1))
def xeb_fidelity(
angle_shifts: np.ndarray, num_p: int
) -> Tuple[float, List[float], np.ndarray, np.ndarray, float]:
new_angles = fsim_angles.copy()
for i, angle_name in enumerate(_fsim_angle_labels):
new_angles[angle_name] += angle_shifts[i]
fsim_mat = angles_to_fsim(**new_angles)
max_cycles = num_cycle_range[num_p - 1]
p_sim = [np.zeros((num_trials, 4)) for _ in range(max_cycles)]
for i in range(num_trials):
unitary = np.identity(4, dtype=complex)
for j in range(max_cycles):
mat_0 = _rot_mats[sq_gates[(q0, q1)][i][0, j]]
mat_1 = _rot_mats[sq_gates[(q0, q1)][i][1, j]]
unitary = np.kron(mat_0, mat_1).dot(unitary)
unitary = fsim_mat.dot(unitary)
if j + 1 in num_cycle_range:
idx = num_cycle_range.index(j + 1)
p_sim[idx][i, :] = np.abs(unitary[:, 0]) ** 2
fidelities = [_alpha_least_square(p_sim[i], p_data[i]) for i in range(num_p)]
cost = -np.sum(fidelities)
err, x_vals, y_vals = pauli_error_fit(
np.asarray(num_cycle_range)[0:num_p],
np.asarray(fidelities),
add_offset=False,
)
return err, fidelities, x_vals, y_vals, cost
def cost_function(angle_shifts: np.ndarray) -> float:
# Accepts shifts in a variable number of FSIM angles and outputs a cost function (i.e.
# XEB fidelity). If the sqrt-iSWAP is the gate, shifts in delta_plus,
# delta_minus_off_diag and delta_minus_diag are specified. If iSWAP is the gate,
# shifts in delta_plus and delta_minus_off_diag are specified. If CZ is the gate,
# shifts in delta_plus and delta_minus_diag are specified. In other cases, shifts in
# all 5 angles are specified. The unspecified angles are set to have zero shifts from
# their initial values.
if gate_to_fit == "sqrt-iswap":
full_shifts = np.zeros(5, dtype=float)
full_shifts[1:4] = angle_shifts
elif gate_to_fit == "iswap":
full_shifts = np.zeros(5, dtype=float)
full_shifts[1:3] = angle_shifts
elif gate_to_fit == "cz" or gate_to_fit == "composite-cz":
full_shifts = np.zeros(5, dtype=float)
full_shifts[1] = angle_shifts[0]
full_shifts[3] = angle_shifts[1]
else:
full_shifts = angle_shifts
_, _, _, _, cost = xeb_fidelity(full_shifts, num_p=num_points)
return cost
sp_purities = [_speckle_purity(p_data[i]) ** 0.5 for i in range(len(num_cycle_range))]
err_p, x_vals_p, y_vals_p = pauli_error_fit(
np.asarray(num_cycle_range), np.asarray(sp_purities), add_offset=True
)
purity_errors[(q0, q1)] = err_p
err_0, f_vals_0, x_fitted_0, y_fitted_0, _ = xeb_fidelity(
np.zeros(5), num_p=len(num_cycle_range)
)
final_errors_unoptimized[(q0, q1)] = err_0
# Set up initial guesses on the relevant FSIM angles according to the ideal gate. See
# comments in cost_function. All angles are allowed to shift up to +/- 1 rad from their
# ideal (initial guess) values.
err_min = 1.0
soln_vec = np.zeros(5)
if gate_to_fit == "sqrt-iswap":
init_guess = np.zeros(3)
bounds = (np.ones(3) * -1.0, np.ones(3) * 1.0)
elif gate_to_fit == "iswap" or gate_to_fit == "cz" or gate_to_fit == "composite-cz":
init_guess = np.zeros(2)
bounds = (np.ones(2) * -1.0, np.ones(2) * 1.0)
else:
init_guess = np.array([0.0, 0.0, 0.0, 0.0, 0.1])
bounds = (np.ones(5) * -1.0, np.ones(5) * 1.0)
for _ in range(num_restarts):
res = pybobyqa.solve(
cost_function, init_guess, maxfun=3000, bounds=bounds, rhoend=1e-11
)
# Randomize the initial values for the relevant FSIM angles.
if gate_to_fit == "sqrt-iswap":
init_guess = np.random.uniform(-0.3, 0.3, 3)
elif gate_to_fit == "iswap" or gate_to_fit == "cz" or gate_to_fit == "composite-cz":
init_guess = np.random.uniform(-0.3, 0.3, 2)
else:
init_guess = np.random.uniform(-0.2, 0.2, 5)
init_guess[0] = 0.0
init_guess[4] = 0.1
if res.f < err_min:
err_min = res.f
if gate_to_fit == "sqrt-iswap":
soln_vec = np.zeros(5)
soln_vec[1:4] = np.asarray(res.x)
elif gate_to_fit == "iswap":
soln_vec = np.zeros(5)
soln_vec[1:3] = np.asarray(res.x)
elif gate_to_fit == "cz" or gate_to_fit == "composite-cz":
soln_vec = np.zeros(5)
soln_vec[1] = np.asarray(res.x)[0]
soln_vec[3] = np.asarray(res.x)[1]
else:
soln_vec = np.asarray(res.x)
err_1, f_vals_1, x_fitted_1, y_fitted_1, _ = xeb_fidelity(
soln_vec, num_p=len(num_cycle_range)
)
final_errors_optimized[(q0, q1)] = err_1
delta_angles[(q0, q1)] = {a: soln_vec[i] for i, a in enumerate(_fsim_angle_labels)}
new_angles = fsim_angles.copy()
for k, v in new_angles.items():
new_angles[k] += delta_angles[(q0, q1)][k]
fitted_angles[(q0, q1)] = new_angles
q_0 = cirq.GridQubit(*q0)
q_1 = cirq.GridQubit(*q1)
gate_list = generic_fsim_gate(new_angles, (q_0, q_1))
circuit_fitted = cirq.Circuit(gate_list)
fitted_gates[(q0, q1)] = circuit_fitted
# Use the fitted FSIM to set up the virtual-Z gates that are needed to cancel out the
# shifts in the SQ phases (i.e. delta angles).
corrected_angles = new_angles.copy()
corrected_angles["delta_plus"] *= -1.0
corrected_angles["delta_minus_off_diag"] *= -1.0
corrected_angles["delta_minus_diag"] *= -1.0
corrected_angles["theta"] = fsim_angles["theta"]
corrected_angles["phi"] = fsim_angles["phi"]
gate_list_corrected = generic_fsim_gate(corrected_angles, (q_0, q_1))
if gate_to_fit == "composite-cz":
circuit_corrected = cirq.Circuit(gate_list_corrected[0:2])
circuit_corrected.append(cz_to_sqrt_iswap(q_0, q_1))
circuit_corrected.append(cirq.Moment(gate_list_corrected[-2:]))
else:
circuit_corrected = cirq.Circuit(gate_list_corrected)
correction_gates[(q0, q1)] = circuit_corrected
raw_data[(q0, q1)] = XEBData(
fidelity_optimized=(np.asarray(num_cycle_range), np.asarray(f_vals_1)),
fidelity_optimized_fit=(x_fitted_1, y_fitted_1),
fidelity_unoptimized=(np.asarray(num_cycle_range), np.asarray(f_vals_0)),
fidelity_unoptimized_fit=(x_fitted_0, y_fitted_0),
purity=(np.asarray(num_cycle_range), np.asarray(sp_purities)),
purity_fit=(x_vals_p, y_vals_p),
)
return ParallelXEBResults(
fitted_gates=fitted_gates,
correction_gates=correction_gates,
fitted_angles=fitted_angles,
final_errors_optimized=final_errors_optimized,
final_errors_unoptimized=final_errors_unoptimized,
purity_errors=purity_errors,
raw_data=raw_data,
)
def _random_rotations(
qubits: Sequence[cirq.GridQubit],
num_layers: int,
rand_seed: Optional[int] = None,
rand_nums: Optional[np.ndarray] = None,
light_cones: Optional[List[List[Set[cirq.GridQubit]]]] = None,
echo_indices: Optional[np.ndarray] = None,
z_rotations_only: bool = False,
) -> Tuple[List[List[cirq.OP_TREE]], np.ndarray]:
"""Generate random single-qubit rotations and group them into different circuit layers."""
num_qubits = len(qubits)
random_state = cirq.value.parse_random_state(rand_seed)
if rand_nums is None:
if z_rotations_only:
rand_nums = random_state.uniform(-1, 1, (num_qubits, num_layers))
else:
rand_nums = random_state.choice(8, (num_qubits, num_layers))
single_q_layers = [] # type: List[List[cirq.OP_TREE]]
for i in range(num_layers):
op_seq = []
for j in range(num_qubits):
gate_choice = 0
if light_cones is not None:
if len(light_cones) == 1:
if qubits[j] not in light_cones[0][i]:
gate_choice = 1
elif len(light_cones) == 2:
if qubits[j] not in light_cones[1][i]:
gate_choice = 2
elif qubits[j] not in light_cones[0][i]:
gate_choice = 1
if gate_choice == 0:
if z_rotations_only:
op_seq.append(cirq.Z(qubits[j]) ** rand_nums[j, i])
else:
op_seq.append(_rot_ops[rand_nums[j, i]](qubits[j]))
elif gate_choice == 1:
if echo_indices is None:
op_seq.append(cirq.Y(qubits[j]))
else:
if echo_indices[j, i] > 0:
op_seq.append(_spin_echo_gates(echo_indices[j, i])(qubits[j]))
else:
continue
single_q_layers.append(op_seq)
return single_q_layers, rand_nums
def _alpha_least_square(probs_exp: np.ndarray, probs_data: np.ndarray) -> float:
"""Compare an ideal and an experimental probability distribution and compute their
cross-entropy fidelity.
"""
if probs_exp.shape != probs_data.shape:
raise ValueError("probs_exp and probs_data must have the same shape")
num_trials, num_states = probs_exp.shape
p_exp = np.maximum(probs_exp, np.zeros_like(probs_exp) + 1e-22)
p_data = np.maximum(probs_data, np.zeros_like(probs_data))
nominator = 0.0
denominator = 0.0
p_uni = 1.0 / num_states
for i in range(num_trials):
s_incoherent = -np.sum(p_uni * np.log(p_exp[i, :]))
s_expected = -np.sum(p_exp[i, :] * np.log(p_exp[i, :]))
s_meas = -np.sum(p_data[i, :] * np.log(p_exp[i, :]))
delta_h_meas = float(s_incoherent - s_meas)
delta_h = float(s_incoherent - s_expected)
nominator += delta_h_meas * delta_h
denominator += delta_h ** 2
return nominator / denominator
def _speckle_purity(probs_data: np.ndarray) -> float:
"""Compute the speckle purity of a probability distribution"""
d = 4
return np.var(probs_data) * d ** 2 * (d + 1) / (d - 1)
def _pairwise_xeb_probabilities(
all_qubits: List[Tuple[int, int]],
num_cycle_range: Sequence[int],
measured_bits: List[List[List[np.ndarray]]],
scrambling_gates: List[List[np.ndarray]],
interaction_sequence: Optional[
List[Set[Tuple[Tuple[float, float], Tuple[float, float]]]]
] = None,
) -> Tuple[
Dict[Tuple[Tuple[int, int], Tuple[int, int]], List[np.ndarray]],
Dict[Tuple[Tuple[int, int], Tuple[int, int]], List[np.ndarray]],
]:
"""Computes the probability distributions of each qubit pair in a parallel XEB experiment.
Args:
all_qubits: List of qubits involved in parallel XEB, specified as (col, row) tuples.
num_cycle_range: Different numbers of circuit cycles used in parallel XEB.
measured_bits: The experimental bit-strings stored in a nested list. The first dimension
of the nested list represents different configurations (e.g. how the two-qubit gates
are applied) used in parallel XEB. The second dimension represents different trials
(i.e. random circuit instances) used in XEB. The third dimension represents the
different numbers of cycles and must be the same as len(num_cycle_range). Each
np.ndarray has dimension M x N, where M is the number of repetitions (stats) for each
circuit and N is the number of qubits involved.
scrambling_gates: The random circuit indices specified as integers between 0 and 7. See
the documentation of build_xeb_circuits for details. The first dimension of the
nested list represents the different configurations and must be the same as the first
dimension of measured_bits. The second dimension represents the different trials and
must be the same as the second dimension of measured_bits.
interaction_sequence: The pairs of qubits with FSIM applied for each configuration. Must
be the same as len(measured_bits).
Returns:
p_data_all: Keys are qubit pairs. Each value is a list (with length =
len(num_cycle_range)) of np.array. The rows of the array are of length 4 and
represent the measured probabilities of two-qubit basis states. Each row represents
the result from a different trial (circuit instance).
sq_gates: Keys are qubit pairs. Each value is a list (with length = number of circuit
instances) of np.array. Each array contains the indices (integers between 0 and 7)
for the random SQ gates relevant to the given qubit pair.
"""
num_trials = len(measured_bits[0])
qubits = [cirq.GridQubit(*idx) for idx in all_qubits]
if interaction_sequence is None:
int_layers = _default_interaction_sequence(qubits)
else:
int_layers = [
{(cirq.GridQubit(i, j), cirq.GridQubit(k, l)) for ((i, j), (k, l)) in layer}
for layer in interaction_sequence
]
p_data_all = {}
sq_gates = {}
for l, qubit_set in enumerate(int_layers):
qubit_pairs = [
((q_s[0].row, q_s[0].col), (q_s[1].row, q_s[1].col)) for q_s in int_layers[l]
]
p_data = {
q_s: [np.zeros((num_trials, 4)) for _ in range(len(num_cycle_range))]
for q_s in qubit_pairs
}
for (q0, q1) in qubit_pairs:
idx_0, idx_1 = all_qubits.index(q0), all_qubits.index(q1)
sq_gates[(q0, q1)] = [
scrambling_gates[l][k][[idx_0, idx_1], :] for k in range(num_trials)
]
for i in range(num_trials):
for j in range(len(num_cycle_range)):
bits = measured_bits[l][i][j]
for q_s in qubit_pairs:
p_data[q_s][j][i, :] = bits_to_probabilities(all_qubits, q_s, bits)
p_data_all = {**p_data_all, **p_data}
return p_data_all, sq_gates
def _spin_echo_gates(idx: int) -> cirq.ops:
"""Outputs one of 4 single-qubit pi rotations which is used for spin echoes."""
pi_pulses = [
cirq.PhasedXPowGate(phase_exponent=0.0, exponent=1.0),
cirq.PhasedXPowGate(phase_exponent=0.5, exponent=1.0),
cirq.PhasedXPowGate(phase_exponent=1.0, exponent=1.0),
cirq.PhasedXPowGate(phase_exponent=-0.5, exponent=1.0),
]
return pi_pulses[idx - 1]
| apache-2.0 |
tarthy6/dozer-thesis | py/plot.py | 3 | 43753 | # encoding: utf-8
# 2008 © Václav Šmilauer <eudoxos@arcig.cz>
"""
Module containing utility functions for plotting inside woo. Most functionality is exposed through :obj:`woo.core.Plot`, however.
"""
## all exported names
__all__=['live','liveInterval','autozoom','legendAlpha','scientific','scatterMarkerKw']
import sys
PY3K=sys.version_info[0]==3
pilOk=False
try:
import PIL as Image
pilOk=True
except ImportError: pass
try:
import Image
pilOk=True
except ImportError: pass
if not pilOk: print 'WARN: PIL/Image module (python-imaging) not importable, embedding images into plots will give errors.'
# PY3K
if PY3K:
def _bytes(s): return bytes(s,'ascii')
else:
def _bytes(s): return s
import matplotlib,os,time,math,itertools,sys
# running in batch
#
# If GtkAgg is the default, X must be working, which is not the case
# with batches (DISPLAY is unset in such case) and importing pylab fails then.
#
# Agg does not require the GUI part and works without any DISPLAY active
# just fine.
#
# see http://www.mail-archive.com/woo-dev@lists.launchpad.net/msg04320.html
# and https://lists.launchpad.net/woo-users/msg03289.html
#
# IMPORTANT: this sets woo.runtime.hasDisplay
try: import woo.qt
except ImportError: pass
import woo.runtime, wooMain, woo.config
if wooMain.options.fakeDisplay: woo.runtime.hasDisplay=False
if 'qt4' not in woo.config.features: woo.runtime.hasDisplay=False
if woo.runtime.hasDisplay==None: # not yet set
raise RuntimeError('woo.plot imported before woo.runtime.hasDisplay is set. This should not really happen, please report.')
if not woo.runtime.hasDisplay:
#from matplotlib.backends.backend_agg import FigureCanvasAgg as WooFigureCanvas
matplotlib.use('Agg') ## pylab API
else:
matplotlib.use('Qt4Agg') # pylab API
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as WooFigureCanvas
from matplotlib.backends.backend_agg import FigureCanvasAgg as _HeadlessFigureCanvas
from minieigen import *
matplotlib.rc('axes',grid=True) # put grid in all figures
import pylab
# simulation-specific bits moved to woo.core.Plot
# so that they are saved and reloaded with Scene automatically
#
# those remain module-global objects
#
live=True if woo.runtime.hasDisplay else False
"Enable/disable live plot updating. Disabled without display (useless)."
liveInterval=.5
"Interval for the live plot updating, in seconds."
autozoom=True
"Enable/disable automatic plot rezooming after data update."
legendAlpha=.6
'Transparency of legend frames in plots'
scientific=True if hasattr(matplotlib.axes.Axes,'ticklabel_format') else False ## safe default for older matplotlib versions
"Use scientific notation for axes ticks."
current=-1
"Point that is being tracked with a scatter point. -1 is for the last point, set to *nan* to disable."
afterCurrentAlpha=.2
"Color alpha value for part of lines after :obj:`woo.plot.current`, between 0 (invisible) to 1 (full color)"
scatterMarkerKw=dict(verts=[(0.,0.),(-30.,10.),(-25,0),(-30.,-10.)],marker=None)
"Parameters for the current position marker"
annotateKw=dict(horizontalalignment='left',verticalalignment='upper right',fontsize=9)
"Parameters for annotation (current value) display"
lineKw=dict(linewidth=1.5,alpha=.8)
"Parameters for the normal line plot"
componentSeparator='_'
componentSuffixes={Vector2:{-1:'norm',0:'x',1:'y'},Vector3:{-1:'norm',0:'x',1:'y',2:'z'},Vector2i:{0:'x',1:'y'},Vector3i:{0:'x',1:'y',2:'z'},Vector6:{-1:'norm',0:'xx',1:'yy',2:'zz',3:'yz',4:'zx',5:'xy'},Matrix3:{(0,0):'xx',(1,1):'yy',(2,2):'zz',(0,1):'xy',(1,0):'yx',(0,2):'xz',(2,0):'zx',(1,2):'yz',(2,1):'zy'}}
# if a type with entry in componentSuffixes is given in addData, columns for individual components are synthesized using indices and suffixes given for each type; negative index means the norm, which is computed using the 'norm()' method (must be defined by the type)
# e.g. foo=Vector3r(1,2,3) will result in columns foo_x=1,foo_y=2,foo_z=3,foo_norm=3.741657...
def Scene_plot_reset(P):
"Reset all plot-related variables (data, plots, labels)"
P.data,P.plots,P.imgData={},{},{}
pylab.close('all')
def Scene_plot_resetData(P):
"Reset all plot data; keep plots and labels intact."
P.data={}
def Scene_plot_splitData(P):
"Make all plots discontinuous at this point (adds nan's to all data fields)"
P.addData({})
def Scene_plot_reverseData(P):
"""Reverse woo.core.Plot.data order.
Useful for tension-compression test, where the initial (zero) state is loaded and, to make data continuous, last part must *end* in the zero state.
"""
for k in P.data: P.data[k].reverse()
def addDataColumns(data,dd):
'''Add new columns with NaN data, without adding anything to other columns. Does nothing for columns that already exist'''
numSamples=len(data[data.keys()[0]]) if len(data)>0 else 0
for d in dd:
if d in data.keys(): continue
data[d]=[nan for i in range(numSamples)]
def Scene_plot_autoData(P,**kw):
"""Add data by evaluating contents of :obj:`woo.core.Plot.plots`. Expressions rasing exceptions will be handled gracefully, but warning is printed for each.
>>> from woo import plot; from woo.dem import *; from woo.core import *
>>> from pprint import pprint
>>> S=Scene(fields=[DemField(gravity=(0,0,-10))])
>>> S.plot.plots={'S.step':('S.time',None,'numParticles=len(S.dem.par)')}
>>> S.plot.autoData()
>>> pprint(S.plot.data)
{'S.step': [0], 'S.time': [0.0], 'numParticles': [0]}
Note that each item in :obj:`woo.core.Plot.plots` can be
* an expression to be evaluated (using the ``eval`` builtin);
* ``name=expression`` string, where ``name`` will appear as label in plots, and expression will be evaluated each time;
* a dictionary-like object -- current keys are labels of plots and current values are added to :obj:`woo.core.Plot.data`. The contents of the dictionary can change over time, in which case new lines will be created as necessary.
A simple simulation with plot can be written in the following way; note how the energy plot is specified.
>>> from woo import plot, utils
>>> S=Scene(fields=[DemField(gravity=(0,0,-10))])
>>> S.plot.plots={'i=S.step':('**S.energy','total energy=S.energy.total()',None,'rel. error=S.energy.relErr()')}
>>> # we create a simple simulation with one ball falling down
>>> S.dem.par.add(Sphere.make((0,0,0),1,mat=utils.defaultMaterial()))
0
>>> S.engines=[Leapfrog(damping=.4,reset=True),
... # get data required by plots at every step
... PyRunner(1,'S.plot.autoData()')
... ]
>>> S.trackEnergy=True
>>> S.run(3,True)
>>> pprint(S.plot.data) #doctest: +ELLIPSIS
{'grav': [0.0, 0.0, -20.357...],
'i': [0, 1, 2],
'kinetic': [0.0, 1.526..., 13.741...],
'nonviscDamp': [nan, nan, 8.143...],
'rel. error': [0.0, 1.0, 0.0361...],
'total energy': [0.0, 1.526..., 1.526...]}
.. plot::
import woo, woo.plot, woo.utils
from woo.dem import *
from woo.core import *
S=Scene(fields=[DemField(gravity=(0,0,-10))])
S.dem.par.add(Sphere.make((0,0,0),1));
S.engines=[Leapfrog(damping=.4,reset=True),PyRunner('S.plot.autoData()')]
S.plot.plots={'i=S.step':('**S.energy','total energy=S.energy.total()',None,'rel. error=S.energy.relErr()')}
S.trackEnergy=True
S.run(500,True)
S.plot.legendLoc=('lower left','upper right')
S.plot.plot()
"""
def colDictUpdate(col,dic,kw):
'update *dic* with the value from col, which is a "expr" or "name=expr" string; all exceptions from ``eval`` are caught and warning is printed without adding any data.'
name,expr=col.split('=',1) if '=' in col else (col,col)
try:
val=eval(expr,kw)
dic.update({name:val})
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating auto-column `'+expr+"'%s."%('' if name==expr else ' ('+name+')')
cols={}
S=P.scene
# data,imgData,plots=P.data,P.imgData,P.plots
kw.update(S=S)
kw.update(woo=woo)
for p in P.plots:
pp=P.plots[p]
colDictUpdate(p.strip(),cols,kw)
for y in tuplifyYAxis(P.plots[p]):
# imgplot specifier
if y==None: continue
yy=addPointTypeSpecifier(y,noSplit=True)[0]
yy1=yy.split('=')[-1]
# dict-like object
# if hasattr(yy,'keys'): cols.update(dict(yy))
# callable returning list sequence of expressions to evaluate
if yy1.startswith('**'):
try:
dd=eval(yy1[2:],{'S':S})
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating dictionary-returning expression "'+yy1[2:]+':'
for k,v in dd.items(): cols[k]=v
elif yy1.startswith('*'):
ee=eval(yy1[1:],{'S':S})
for e in ee: colDictUpdate(e,cols,{'S':S})
else: colDictUpdate(yy,cols,kw)
P.addData(cols)
def Scene_plot_addData(P,*d_in,**kw):
"""Add data from arguments name1=value1,name2=value2 to woo.plot.data.
(the old {'name1':value1,'name2':value2} is deprecated, but still supported)
New data will be padded with nan's, unspecified data will be nan (nan's don't appear in graphs).
This way, equal length of all data is assured so that they can be plotted one against any other.
>>> S=woo.master.scene
>>> from pprint import pprint
>>> S.plot.resetData()
>>> S.plot.addData(a=1)
>>> S.plot.addData(b=2)
>>> S.plot.addData(a=3,b=4)
>>> pprint(S.plot.data)
{'a': [1, nan, 3], 'b': [nan, 2, 4]}
Some sequence types can be given to addData; they will be saved in synthesized columns for individual components.
>>> S.plot.resetData()
>>> S.plot.addData(c=Vector3(5,6,7),d=Matrix3(8,9,10, 11,12,13, 14,15,16))
>>> pprint(S.plot.data) #doctest: +ELLIPSIS
{'c_norm': [10.488...],
'c_x': [5.0],
'c_y': [6.0],
'c_z': [7.0],
'd_xx': [8.0],
'd_xy': [9.0],
'd_xz': [10.0],
'd_yx': [11.0],
'd_yy': [12.0],
'd_yz': [13.0],
'd_zx': [14.0],
'd_zy': [15.0],
'd_zz': [16.0]}
"""
data,imgData=P.data,P.imgData
import numpy
if len(data)>0: numSamples=len(data[data.keys()[0]])
else: numSamples=0
# align with imgData, if there is more of them than data
if len(imgData)>0 and numSamples==0: numSamples=max(numSamples,len(imgData[imgData.keys()[0]]))
d=(d_in[0] if len(d_in)>0 else {})
d.update(**kw)
# handle types composed of multiple values (vectors, matrices)
dNames=d.keys()[:] # make copy, since dict cannot change size if iterated over directly
for name in dNames:
if type(d[name]) in componentSuffixes:
val=d[name]
suffixes=componentSuffixes[type(d[name])]
for ix in suffixes:
d[name+componentSeparator+suffixes[ix]]=(d[name][ix] if ix>=0 else d[name].norm())
del d[name]
elif hasattr(d[name],'__len__'):
raise ValueError('plot.addData given unhandled sequence type (is a '+type(d[name]).__name__+', must be number or '+'/'.join([k.__name__ for k in componentSuffixes])+')')
for name in d:
if not name in data.keys(): data[name]=[]
for name in data:
data[name]+=(numSamples-len(data[name]))*[nan]
data[name].append(d[name] if name in d else nan)
#print [(k,len(data[k])) for k in data.keys()]
#numpy.array([nan for i in range(numSamples)])
#numpy.append(data[name],[d[name]],1)
def Scene_plot_addImgData(P,**kw):
data,imgData=P.data,P.imgData
for k in kw:
if k not in imgData: imgData[k]=[]
# align imgData with data
if len(data.keys())>0 and len(imgData.keys())>0:
nData,nImgData=len(data[data.keys()[0]]),len(imgData[imgData.keys()[0]])
#if nImgData>nData-1: raise RuntimeError("imgData is already the same length as data?")
if nImgData<nData-1: # repeat last value
for k in imgData.keys():
lastValue=imgData[k][-1] if len(imgData[k])>0 else None
imgData[k]+=(nData-len(imgData[k])-1)*[lastValue]
elif nData<nImgData:
for k in data.keys():
lastValue=data[k][-1] if len(data[k])>0 else nan
data[k]+=(nImgData-nData)*[lastValue] # add one more, because we will append to imgData below
# add values from kw
newLen=(len(imgData[imgData.keys()[0]]) if imgData else 0)+1 # current length plus 1
for k in kw:
if k in imgData and len(imgData[k])>0: imgData[k]+=(newLen-len(imgData[k])-1)*[imgData[k][-1]]+[kw[k]] # repeat last element as necessary
else: imgData[k]=(newLen-1)*[None]+[kw[k]] # repeat None if no previous value
# align values which were not in kw by repeating the last value
for k in imgData:
if len(imgData[k])<newLen: imgData[k]+=(newLen-len(imgData[k]))*[imgData[k][-1]]
assert len(set([len(i) for i in imgData.values()]))<=1 # no data or all having the same value
# not public functions
def addPointTypeSpecifier(o,noSplit=False):
"""Add point type specifier to simple variable name; optionally take only the part before '=' from the first item."""
if type(o) in [tuple,list]:
if noSplit or not type(o[0])==str: return o
else: return (o[0].split('=',1)[0],)+tuple(o[1:])
else: return (o if (noSplit or not type(o)==str) else (o.split('=',1)[0]),'')
def tuplifyYAxis(pp):
"""convert one variable to a 1-tuple"""
if type(pp) in [tuple,list]: return pp
else: return (pp,)
def xlateLabel(l,labels):
"Return translated label; return l itself if not in the labels dict."
if l in labels.keys(): return labels[l]
else: return l
class LineRef:
"""Holds reference to plot line and to original data arrays (which change during the simulation),
and updates the actual line using those data upon request."""
def __init__(self,line,scatter,annotation,line2,xdata,ydata,imgData=None,dataName=None):
self.line,self.scatter,self.annotation,self.line2,self.xdata,self.ydata,self.imgData,self.dataName=line,scatter,annotation,line2,xdata,ydata,imgData,dataName
def update(self):
if isinstance(self.line,matplotlib.image.AxesImage):
# image name
try:
if len(self.xdata)==0 and self.dataName: self.xdata=self.imgData[self.dataName] # empty list reference an empty singleton, not the list we want; adjust here
import Image
if self.xdata[current]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(self.xdata[current])
self.line.set_data(img)
except IndexError: pass
else:
# regular data
import numpy
# current==-1 avoids copy slicing data in the else part
if current==None or current==-1 or afterCurrentAlpha==1:
self.line.set_xdata(self.xdata); self.line.set_ydata(self.ydata)
self.line2.set_xdata([]); self.line2.set_ydata([])
else:
try: # try if we can extend the first part by one so that lines are connected
self.xdata[:current+1]; preCurrEnd=current+1
except IndexError: preCurrEnd=current
preCurrEnd=current+(1 if len(self.xdata)>current else 0)
self.line.set_xdata(self.xdata[:preCurrEnd]); self.line.set_ydata(self.ydata[:preCurrEnd])
self.line2.set_xdata(self.xdata[current:]); self.line2.set_ydata(self.ydata[current:])
try:
x,y=self.xdata[current],self.ydata[current]
except IndexError: x,y=0,0
# this could be written in a nicer way, very likely
try:
pt=numpy.ndarray((2,),buffer=numpy.array([float(x),float(y)]))
if self.scatter:
self.scatter.set_offsets(pt)
# change rotation of the marker (possibly incorrect)
try:
dx,dy=self.xdata[current]-self.xdata[current-1],self.ydata[current]-self.ydata[current-1]
# smoothing from last n values, if possible
# FIXME: does not show arrow at all if less than window values
#try:
# window=10
# dx,dy=[numpy.average(numpy.diff(dta[current-window:current])) for dta in self.xdata,self.ydata]
#except IndexError: pass
# there must be an easier way to find on-screen derivative angle, ask on the matplotlib mailing list
axes=self.line.get_axes()
p=axes.patch; xx,yy=p.get_verts()[:,0],p.get_verts()[:,1]; size=max(xx)-min(xx),max(yy)-min(yy)
aspect=(size[1]/size[0])*(1./axes.get_data_ratio())
angle=math.atan(aspect*dy/dx)
if dx<0: angle-=math.pi
self.scatter.set_transform(matplotlib.transforms.Affine2D().rotate(angle))
except IndexError: pass
if self.annotation:
if math.isnan(x) or math.isnan(y):
if hasattr(self.annotation,'xyann'): self.annotation.xyann=(x,y)
else: self.annotation.xytext=(0,0)
self.annotation.set_text('') # make invisible, place anywhere
else:
#
if hasattr(self.annotation,'xyann'): self.annotation.xyann=(x,y) # newer MPL versions (>=1.4)
else: self.annotation.xyann=(x,y)
self.annotation.set_text(self.annotation.annotateFmt.format(xy=(float(x),float(y))))
except TypeError: pass # this happens at i386 with empty data, saying TypeError: buffer is too small for requested array
liveTimeStamp=0 # timestamp when live update was started, so that the old thread knows to stop if that changes
nan=float('nan')
def createPlots(P,subPlots=True,noShow=False,replace=True,scatterSize=60,wider=False):
'''Create plots based on current data;
:param subPlots: show all plots in one figure as subplots; otherwise, create multiple figures
:param noShow: use headless backend for plots, and do not show plots on the screen
:param replace: do not close existing figures, and do not update P.currLineRefs
'''
import logging
data,imgData,plots,labels,xylabels,legendLoc,axesWd,annotateFmt=P.data,P.imgData,P.plots,P.labels,P.xylabels,P.legendLoc,P.axesWd,P.annotateFmt
if replace:
if P.currLineRefs:
logging.info('Closing existing figures')
ff=set([l.line.get_axes().get_figure() for l in P.currLineRefs]) # get all current figures
for f in ff: pylab.close(f) # close those
P.currLineRefs=[]
figs=[]
if len(plots)==0: return # nothing to plot
if subPlots:
# compute number of rows and colums for plots we have
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
if wider: subRows,subCols=subCols,subRows
# create a new figure; called once with subPlots, for each subplot without subPlots
def _newFig():
## pylab API
if not noShow: return pylab.figure() # this will go onto the screen; the pylab call sets up the windows as well
else: # with noShow
fig=matplotlib.figure.Figure()
canvas=_HeadlessFigureCanvas(fig) #
return fig
if subPlots: figs=[_newFig()]
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if not subPlots:
figs.append(_newFig())
axes=figs[-1].add_subplot(1,1,1)
else: axes=figs[-1].add_subplot(subRows,subCols,nPlot+1) # nPlot is 1-based in mpl, for matlab comatibility
axes.grid(True)
if plots[p]==None: # image plot
if not pStrip in imgData.keys(): imgData[pStrip]=[]
# fake (empty) image if no data yet
import Image
if len(imgData[pStrip])==0 or imgData[pStrip][-1]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(imgData[pStrip][-1])
img=axes.imshow(img,origin='upper')
if replace: P.currLineRefs.append(LineRef(line=img,scatter=None,annotation=None,line2=None,xdata=imgData[pStrip],ydata=None,imgData=imgData,dataName=pStrip))
axes.set_axis_off()
continue
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
plots_p_y1,plots_p_y2=[],[]; y1=True
missing=set() # missing data columns
if pStrip not in data.keys(): missing.add(pStrip.decode('utf-8','ignore'))
for d in plots_p:
if d[0]==None:
y1=False; continue
if not isinstance(d[0],(str,unicode)): raise ValueError('Plots specifiers must be strings (not %s)'%(type(d[0]).__name__))
if y1: plots_p_y1.append(d)
else: plots_p_y2.append(d)
try:
if (
d[0] not in data.keys()
# and not callable(d[0])
and not (isinstance(d[0],(str,unicode)) and (d[0].startswith('**') or d[0].startswith('*'))) # hack for callable as strings
# and not hasattr(d[0],'keys')
):
missing.add(d[0])
except UnicodeEncodeError:
import warnings
warnings.error('UnicodeDecodeError when processing data set '+repr(d[0]))
if missing:
if len(data.keys())==0 or len(data[data.keys()[0]])==0: # no data at all yet, do not add garbage NaNs
for m in missing: data[m]=[]
else:
addDataColumns(data,missing)
try:
print 'Missing columns in Scene.plot.data, added NaNs:',', '.join([m.encode('utf-8') for m in missing])
except UnicodeDecodeError:
warnings.warn('UnicodeDecodeError reporting missing data columns -- harmless, just wondering...')
def createLines(pStrip,ySpecs,axes,isY1=True,y2Exists=False):
'''Create data lines from specifications; this code is common for y1 and y2 axes;
it handles y-data specified as callables/dicts passed as string (starting with '*'/'**'), which might create additional lines when updated with liveUpdate.
'''
# save the original specifications; they will be smuggled into the axes object
# the live updated will run yNameFuncs to see if there are new lines to be added
# and will add them if necessary
yNameFuncs=set()
yNames=set()
ySpecs2=[]
for ys in ySpecs:
if not isinstance(ys[0],(str,unicode)): raise ValueError('Plot specifications must be strings (not a %s).'%type(ys[0]))
if ys[0].startswith('**') or ys[0].startswith('*'):
evEx=eval(ys[0][(2 if ys[0].startswith('**') else 1):],{'S':P.scene})
yNameFuncs.add(evEx) # add callable or dictionary
# XXX: what is ys[1]? Previously, there was no line specifier there for dicts at least
# print evEx,type(evEx), evEx.__iter__(),type(evEx.__iter__())
ySpecs2+=[(ret,ys[1]) for ret in evEx] # traverse list or dict keys
else: ySpecs2.append(ys)
if len(ySpecs2)==0:
print 'woo.plot: creating fake plot, since there are no y-data yet'
line,=axes.plot([nan],[nan])
line2,=axes.plot([nan],[nan])
if replace: P.currLineRefs.append(LineRef(line=line,scatter=None,annotation=None,line2=line2,xdata=[nan],ydata=[nan]))
# set different color series for y1 and y2 so that they are recognizable
if matplotlib.rcParams.has_key('axes.color_cycle'): matplotlib.rcParams['axes.color_cycle']='b,g,r,c,m,y,k' if not isY1 else 'm,y,k,b,g,r,c'
for d in ySpecs2:
yNames.add(d)
# should have been handled above already
#if pStrip not in data:
# print 'Missing column %s in Scene.plot.data, added NaN.'%pString
# addDataColumns(data,[pStrip])
if d[0] not in data:
print 'Missing column %s in Scene.plot.data, added NaN.'%d[0]
addDataColumns(data,[d[0]])
line,=axes.plot(data[pStrip],data[d[0]],d[1],label=xlateLabel(d[0],P.labels),**lineKw)
lineKwWithoutAlpha=dict([(k,v) for k,v in lineKw.items() if k!='alpha'])
line2,=axes.plot([],[],d[1],color=line.get_color(),alpha=afterCurrentAlpha,**lineKwWithoutAlpha)
# use (0,0) if there are no data yet
scatterPt=[0,0] if len(data[pStrip])==0 else (data[pStrip][current],data[d[0]][current])
scatterPtPos=[scatterPt[0] if not math.isnan(scatterPt[0]) else 0,scatterPt[1] if not math.isnan(scatterPt[1]) else 0]
# if current value is NaN, use zero instead
scatter=axes.scatter(scatterPtPos[0],scatterPtPos[1],s=scatterSize,color=line.get_color(),**scatterMarkerKw)
if annotateFmt:
if math.isnan(scatterPtPos[0]) or math.isnan(scatterPtPos[1]): text=''
else: text=annotateFmt.format(xy=scatterPt)
annotation=axes.annotate(text,xy=scatterPtPos,color=line.get_color(),**annotateKw)
annotation.annotateFmt=annotateFmt
else: annotation=None
if replace: P.currLineRefs.append(LineRef(line=line,scatter=scatter,annotation=annotation,line2=line2,xdata=data[pStrip],ydata=data[d[0]]))
axes=line.get_axes()
labelLoc=(legendLoc[0 if isY1 else 1] if y2Exists>0 else 'best')
l=axes.legend(loc=labelLoc)
if l:
l.get_frame().set_alpha(legendAlpha)
if hasattr(l,'draggable'): l.draggable(True)
if scientific:
axes.ticklabel_format(style='sci',scilimits=(0,0),axis='both')
# fixes scientific exponent placement for y2: https://sourceforge.net/mailarchive/forum.php?thread_name=20101223174750.GD28779%40ykcyc&forum_name=matplotlib-users
if not isY1: axes.yaxis.set_offset_position('right')
if isY1:
axes.set_ylabel((', '.join([xlateLabel(_p[0],P.labels) for _p in ySpecs2])) if p not in xylabels or not xylabels[p][1] else xylabels[p][1])
axes.set_xlabel(xlateLabel(pStrip,P.labels) if (p not in xylabels or not xylabels[p][0]) else xylabels[p][0])
else:
axes.set_ylabel((', '.join([xlateLabel(_p[0],P.labels) for _p in ySpecs2])) if (p not in xylabels or len(xylabels[p])<3 or not xylabels[p][2]) else xylabels[p][2])
# if there are callable/dict ySpecs, save them inside the axes object, so that the live updater can use those
if yNameFuncs:
axes.wooYNames,axes.wooYFuncs,axes.wooXName,axes.wooLabelLoc=yNames,yNameFuncs,pStrip,labelLoc # prepend woo to avoid clashes
if 0:
# fix missing 'show' method; this has been fixed in matplotlib already, but we need to backport that
# see https://github.com/matplotlib/matplotlib/commit/15fd0ae587a57cb1d7b69546eb359085315148c8
# don't do that for headless backend, error there is fine
fig=axes.get_figure()
if not hasattr(fig,'show'):
mgr=getattr(fig.canvas,'manager')
if mgr: fig.show=lambda *args: mgr.window.show()
createLines(pStrip,plots_p_y1,axes=axes,isY1=True,y2Exists=len(plots_p_y2)>0)
if axesWd>0:
axes.axhline(linewidth=axesWd,color='k')
axes.axvline(linewidth=axesWd,color='k')
# create y2 lines, if any
if len(plots_p_y2)>0:
axes=axes.twinx() # create the y2 axis
createLines(pStrip,plots_p_y2,axes,isY1=False,y2Exists=True)
### scene is not directly accessible from here, do it like this:
S=woo.master.scene
if S.plot==P:
if 'title' in S.tags: axes.set_title(S.tags['title'])
return figs
def liveUpdate(P,timestamp):
global liveTimeStamp
liveTimeStamp=timestamp
import sys
while True:
if not live or liveTimeStamp!=timestamp:
return
figs,axes,linesData=set(),set(),set()
data=P.data
for l in P.currLineRefs:
l.update()
figs.add(l.line.get_figure())
axes.add(l.line.get_axes())
linesData.add(id(l.ydata))
# find callables in y specifiers, create new lines if necessary
for ax in axes:
if not hasattr(ax,'wooYFuncs') or not ax.wooYFuncs: continue # not defined of empty
yy=set();
for f in ax.wooYFuncs:
if callable(f): yy.update(f())
elif hasattr(f,'keys'):
yy.update(f.keys())
else: raise ValueError("Internal error: ax.wooYFuncs items must be callables or dictionary-like objects and nothing else.")
#print 'callables y names:',yy
news=yy-ax.wooYNames
if not news: continue
for new in news:
ax.wooYNames.add(new)
if new in data.keys() and id(data[new]) in linesData: continue # do not add when reloaded and the old lines are already there
print 'woo.plot: creating new line for',new
if not new in data.keys(): data[new]=len(data[ax.wooXName])*[nan] # create data entry if necessary
#print 'data',len(data[ax.wooXName]),len(data[new]),data[ax.wooXName],data[new]
line,=ax.plot(data[ax.wooXName],data[new],label=xlateLabel(new,P.labels)) # no line specifier
line2,=ax.plot([],[],color=line.get_color(),alpha=afterCurrentAlpha)
scatterPt=(0 if len(data[ax.wooXName])==0 or math.isnan(data[ax.wooXName][current]) else data[ax.wooXName][current]),(0 if len(data[new])==0 or math.isnan(data[new][current]) else data[new][current])
scatter=ax.scatter(scatterPt[0],scatterPt[1],s=60,color=line.get_color(),**scatterMarkerKw)
if P.annotateFmt:
annotation=ax.annotate(P.annotateFmt.format(xy=scatterPt),xy=scatterPt,color=line.get_color(),**annotateKw)
annotation.annotateFmt=P.annotateFmt
else: annotation=None
P.currLineRefs.append(LineRef(line=line,scatter=scatter,annotation=annotation,line2=line2,xdata=data[ax.wooXName],ydata=data[new]))
ax.set_ylabel(ax.get_ylabel()+(', ' if ax.get_ylabel() else '')+xlateLabel(new,P.labels))
# it is possible that the legend has not yet been created
l=ax.legend(loc=ax.wooLabelLoc)
if l:
l.get_frame().set_alpha(legendAlpha)
if hasattr(l,'draggable'): l.draggable(True)
if autozoom:
for ax in axes:
try:
ax.relim() # recompute axes limits
ax.autoscale_view()
except RuntimeError: pass # happens if data are being updated and have not the same dimension at the very moment
for fig in figs:
#sys.stderr.write('*')
try:
fig.canvas.draw()
except RuntimeError: pass # happens here too
#sys.stderr.write('(')
time.sleep(liveInterval)
#sys.stderr.write(')')
def savePlotSequence(P,fileBase,stride=1,imgRatio=(5,7),title=None,titleFrames=20,lastFrames=30):
'''Save sequence of plots, each plot corresponding to one line in history. It is especially meant to be used for :obj:`woo.utils.makeVideo`.
:param stride: only consider every stride-th line of history (default creates one frame per each line)
:param title: Create title frame, where lines of title are separated with newlines (``\\n``) and optional subtitle is separated from title by double newline.
:param int titleFrames: Create this number of frames with title (by repeating its filename), determines how long the title will stand in the movie.
:param int lastFrames: Repeat the last frame this number of times, so that the movie does not end abruptly.
:return: List of filenames with consecutive frames.
'''
data,imgData,plots=P.data,P.imgData,P.plots
fig=createPlots(P,noShow=True,replace=True,subPlots=True,scatterSize=60,wider=True)[0]
sqrtFigs=math.sqrt(len(plots))
fig.set_size_inches(8*sqrtFigs,5*sqrtFigs) # better readable
fig.subplots_adjust(left=.05,right=.95,bottom=.05,top=.95) # make it more compact
if len(plots)==1 and plots[plots.keys()[0]]==None: # only pure snapshot is there
fig.set_size_inches(5,5)
fig.subplots_adjust(left=0,right=1,bottom=0,top=1)
#if not data.keys(): raise ValueError("plot.data is empty.")
pltLen=max(len(data[data.keys()[0]]) if data else 0,len(imgData[imgData.keys()[0]]) if imgData else 0)
if pltLen==0: raise ValueError("Both plot.data and plot.imgData are empty.")
global current
ret=[]
print 'Saving %d plot frames, it can take a while...'%(pltLen)
for i,n in enumerate(range(0,pltLen,stride)):
current=n
for l in P.currLineRefs: l.update()
out=fileBase+'-%03d.png'%i
fig.savefig(out)
ret.append(out)
sys.stderr.write('[%d]'%i)
if len(ret)==0: raise RuntimeError("No images created?!")
if title:
import Image
titleImgName=fileBase+'-title.png'
createTitleFrame(titleImgName,Image.open(ret[-1]).size,title)
ret=titleFrames*[titleImgName]+ret
if lastFrames>1: ret+=(lastFrames-1)*[ret[-1]]
return ret
def createTitleFrame(out,size,title,bgColor=(.8,.6,.8),fgColor='#405090',logo=None,logoPos=(20,20)):
'''Create figure with title and save to file.
:param out: file to save the result to; format is anything supported by matplotlib.
:param size: figure size (for pixel output formats), tuple of (width,height)
:param str title: title and subtitle; lines are separated by single newlines (``\n``) and subtitle (if any) is separated from the title by two consecutive newlines (``\n\n``). Oversize lines are scaled to fit the width, line spacing fits all lines.
:param color fgColor: Font color, any `color format that Matplotlib understands <http://matplotlib.org/api/colors_api.html>`__.
:param color bgColor: Background color.
:param logo: filename or file-like object to be read via `matplotlib.pyploy.imread <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imread>`__.
:param logoPos: position where to place the logo.
'''
import matplotlib, matplotlib.figure, matplotlib.mathtext
# http://stackoverflow.com/a/13714720/761090
dpi=100 # does not matter as font is specified in inches
fig=matplotlib.figure.Figure(figsize=(size[0]/dpi,size[1]/dpi),dpi=dpi,facecolor=bgColor)
canvas=_HeadlessFigureCanvas(fig)
#fig.set_facecolor('blue'); fig.patch.set_color('blue'); fig.patch.set_facecolor('blue'); fig.patch.set_alpha(None)
titSub=title.split('\n\n')
if len(titSub)==0: subtitle=''
elif len(titSub)==1: title,subtitle=titSub
else: title,subtitle=titSub[0],'\n'.join(titSub[1:])
lines=[(t,True) for t in title.split('\n')]+([(t,False) for t in subtitle.split('\n')] if subtitle else [])
nLines=len(lines); fontSizes=size[1]/10.,size[1]/16.
def writeLine(text,vertPos,fontsize):
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize,dpi=fig.get_dpi(),color=fgColor)
textsize=rgba.shape[1],rgba.shape[0]
if textsize[0]>size[0]:
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize*size[0]/textsize[0],dpi=fig.get_dpi(),color=fgColor)
textsize=rgba.shape[1],rgba.shape[0]
fig.figimage(rgba.astype(float)/255.,xo=(size[0]-textsize[0])/2.,yo=vertPos-depth)
nTitle,nSubtitle=len(title.split('\n')),len(subtitle.split('\n')) if subtitle else 0
nLines=nTitle+nSubtitle
ht=size[1]; y0=ht-2*fontSizes[0]; yStep=(ht-2.5*fontSizes[0])/(nTitle+.6*nSubtitle+(.5 if nSubtitle else 0))
def lineYOffset(lineno):
# .5*yStep is per between title and subtitle
return nTitle*yStep+.5*yStep+(i-nTitle)*.6*yStep if i>=nTitle else i*yStep
if logo:
import matplotlib.pylab
logoData=pylab.imread(logo)
fig.figimage(logoData,xo=logoPos[0],yo=logoPos[1],origin='upper')
for i,(l,isTitle) in enumerate(lines):
writeLine(l,y0-lineYOffset(i),fontSizes[0 if isTitle else 1])
# http://stackoverflow.com/a/4805178/761090 - savefig default overrides facecolor set previously
fig.savefig(out,facecolor=fig.get_facecolor())
def Scene_plot_plot(P,noShow=False,subPlots=True):
"""Do the actual plot, which is either shown on screen (and nothing is returned: if *noShow* is ``False``) or, if *noShow* is ``True``, returned list of matplotlib's Figure objects.
You can use
>>> import woo,woo.core,os
>>> S=woo.core.Scene()
>>> S.plot.plots={'foo':('bar',)}
>>> S.plot.addData(foo=1,bar=2)
>>> somePdf=woo.master.tmpFilename()+'.pdf'
>>> S.plot.plot(noShow=True)[0].savefig(somePdf)
>>> os.path.exists(somePdf)
True
to save the figure to file automatically.
"""
figs=createPlots(P,subPlots=subPlots,noShow=noShow,replace=(False if noShow else True))
# figs=set([l.line.get_axes().get_figure() for l in P.currLineRefs])
if not figs:
import warnings
warnings.warn('Nothing to plot.')
return
if not hasattr(list(figs)[0],'show') and not noShow:
import warnings
warnings.warn('plot.plot not showing figure (matplotlib using headless backend?)')
noShow=True
if not noShow:
if not woo.runtime.hasDisplay: return # would error out with some backends, such as Agg used in batches
if 1:
if live:
import threading
t=threading.Thread(target=liveUpdate,args=(P,time.time()))
t.daemon=True
t.start()
# pylab.show() # this blocks for some reason; call show on figures directly
for f in figs:
f.show()
# should have fixed https://bugs.launchpad.net/woo/+bug/606220, but does not work apparently
if 0:
import matplotlib.backend_bases
if 'CloseEvent' in dir(matplotlib.backend_bases):
def closeFigureCallback(event):
ff=event.canvas.figure
# remove closed axes from our update list
P.currLineRefs=[l for l in P.currLineRefs if l.line.get_axes().get_figure()!=ff]
f.canvas.mpl_connect('close_event',closeFigureCallback)
# else:
# figs=list(set([l.line.get_axes().get_figure() for l in P.currLineRefs]))
return figs
def Scene_plot_saveDataTxt(P,fileName,vars=None):
"""Save plot data into a (optionally compressed) text file. The first line contains a comment (starting with ``#``) giving variable name for each of the columns. This format is suitable for being loaded for further processing (outside woo) with ``numpy.genfromtxt`` function, which recognizes those variable names (creating numpy array with named entries) and handles decompression transparently.
>>> import woo, woo.core
>>> from pprint import pprint
>>> S=woo.core.Scene()
>>> S.plot.addData(a=1,b=11,c=21,d=31) # add some data here
>>> S.plot.addData(a=2,b=12,c=22,d=32)
>>> pprint(S.plot.data)
{'a': [1, 2], 'b': [11, 12], 'c': [21, 22], 'd': [31, 32]}
>>> txt=woo.master.tmpFilename()+'.txt.bz2'
>>> S.plot.saveDataTxt(txt,vars=('a','b','c'))
>>> import numpy
>>> d=numpy.genfromtxt(txt,dtype=None,names=True)
>>> d['a']
array([1, 2])
>>> d['b']
array([11, 12])
:param fileName: file to save data to; if it ends with ``.bz2`` / ``.gz``, the file will be compressed using bzip2 / gzip.
:param vars: Sequence (tuple/list/set) of variable names to be saved. If ``None`` (default), all variables in :obj:`woo.core.Plot` are saved.
"""
import bz2,gzip
data=P.data
if not vars:
vars=data.keys(); vars.sort()
fileName=P.scene.expandTags(fileName)
if fileName.endswith('.bz2'): f=bz2.BZ2File(fileName,'wb')
elif fileName.endswith('.gz'): f=gzip.GzipFile(fileName,'wb')
else: f=open(fileName,'wb')
f.write(_bytes("# "+"\t".join(vars)+"\n"))
for i in range(len(data[vars[0]])):
f.write(_bytes("\t".join([str(data[var][i]) for var in vars])+"\n"))
f.close()
def savePylab(baseName,timestamp=False,title=None):
'''This function is not finished, do not use it.'''
import time
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
saveDataTxt(fileName=baseName+'.data.bz2')
if len(plots)==0: raise RuntimeError("No plots to save, only data saved.")
py=file(baseName+'.py','w')
py.write('#!/usr/bin/env python\n# encoding: utf-8\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\nimport pylab, numpy\n')
py.write("data=numpy.genfromtxt('%s.data.bz2',dtype=None,names=True)\n"%baseName)
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue # image plots, which is not exported
if len(plots)==1: py.write('pylab.figure()\n')
else: py.write('pylab.subplot(%d,%d,%d)\n'%(subRows,subCols,nPlots))
def _mkTimestamp():
import time
return time.strftime('_%Y%m%d_%H:%M')
def Scene_plot_saveGnuplot(P,baseName,term='wxt',extension=None,timestamp=False,comment=None,title=None,varData=False,timeStamp=True):
"""Save data added with :obj:`woo.plot.addData` into (compressed) file and create .gnuplot file that attempts to mimick plots specified with :obj:`woo.plot.plots`.
:param baseName: used for creating baseName.gnuplot (command file for gnuplot), associated ``baseName.data.bz2`` (data) and output files (if applicable) in the form ``baseName.[plot number].extension``
:param term: specify the gnuplot terminal; defaults to ``x11``, in which case gnuplot will draw persistent windows to screen and terminate; other useful terminals are ``png``, ``cairopdf`` and so on
:param extension: extension for ``baseName`` defaults to terminal name; fine for png for example; if you use ``cairopdf``, you should also say ``extension='pdf'`` however
:param bool timestamp: append numeric time to the basename
:param bool varData: whether file to plot will be declared as variable or be in-place in the plot expression
:param comment: a user comment (may be multiline) that will be embedded in the control file
:return: name of the gnuplot file created.
"""
data,imgData,plots,labels,xylabels=P.data,P.imgData,P.plots,P.labels,P.xylabels
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
vars=data.keys(); vars.sort()
P.saveDataTxt(fileName=baseName+'.data.bz2',vars=vars)
fPlot=file(baseName+".gnuplot",'w')
fPlot.write('#!/usr/bin/env gnuplot\n#\n')
if timeStamp: fPlot.write('# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\n')
if comment: fPlot.write('# '+comment.replace('\n','\n# ')+'#\n')
dataFile='"< bzcat %s.data.bz2"'%(baseNameNoPath)
if varData:
fPlot.write('dataFile=%s'%dataFile); dataFile='dataFile'
if not extension: extension=term
i=0
for p in plots:
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue ## this plot is image plot, which is not applicable to gnuplot
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
if term in ['wxt','x11']: fPlot.write("set term %s %d persist\n"%(term,i))
else: fPlot.write("set term %s; set output '%s.%d.%s'\n"%(term,baseNameNoPath,i,extension))
fPlot.write("set xlabel '%s'\n"%xlateLabel(p,labels))
fPlot.write("set grid\n")
fPlot.write("set datafile missing 'nan'\n")
if title: fPlot.write("set title '%s'\n"%title)
y1=True; plots_y1,plots_y2=[],[]
# replace callable/dict-like data specifiers by the results, it that particular data exists
plots_p2=[]
for pp in plots_p:
if pp[0]==None: plots_p2.append((pp[0],pp[1]))
elif pp[0].startswith('**'):
try:
dd=eval(pp[0][2:],{'S':P.scene})
plots_p2+=[(ppp,'') for ppp in dd.keys() if ppp in data.keys()]
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating expression "'+pp[0][2:]+'".'
elif pp[0].startswith('*'):
plots_p2+=[(e,'') for e in eval(pp[0][1:],{'S':P.scene}) if e in data.keys()]
else: plots_p2.append((pp[0],pp[1]))
plots_p=plots_p2
#plots_p=sum([([(pp,'') for pp in p[0]() if pp in data.keys()] if callable(p[0]) else [(p[0],p[1])] ) for p in plots_p],[])
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_y1.append(d)
else: plots_y2.append(d)
fPlot.write("set ylabel '%s'\n"%(','.join([xlateLabel(_p[0],labels) for _p in plots_y1])))
if len(plots_y2)>0:
fPlot.write("set y2label '%s'\n"%(','.join([xlateLabel(_p[0],labels) for _p in plots_y2])))
fPlot.write("set y2tics\n")
ppp=[]
def _mkLine(varX,varY,i):
return " %s using %d:%d title '%s%s(%s)%s' with lines%s"%(dataFile,vars.index(varX)+1,vars.index(varY)+1,'← ' if i==0 else'',xlateLabel(varY,labels),xlateLabel(varX,labels),' →' if i==1 else '',' axes x1y2' if i==1 else '')
for pp in plots_y1: ppp.append(_mkLine(pStrip,pp[0],0))
for pp in plots_y2: ppp.append(_mkLine(pStrip,pp[0],1))
fPlot.write("plot "+",".join(ppp)+"\n")
i+=1
fPlot.close()
return baseName+'.gnuplot'
def _deprecPlotFunc(old,func,new=None,takesScene=False,*args,**kw):
"Wrapper for deprecated functions, example below."
import warnings
if not new: new=old
warnings.warn('Function plot.%s is deprecated, use %s.%s instead.'%(old,('Scene' if takesScene else 'Scene.plot'),new),stacklevel=3,category=DeprecationWarning)
S=woo.master.scene
if takesScene: return func(S,*args,**kw)
else: return func(S.plot,*args,**kw)
#
# DEPRECATED functions, will be removed at some point!
#
def reset(): _deprecPlotFunc('reset',Scene_plot_reset)
def resetData(): _deprecPlotFunc('resetData',Scene_plot_resetData)
def splitData(): _deprecPlotFunc('splitData',Scene_plot_splitData)
def reverseData(): _deprecPlotFunc('reverseData',Scene_plot_reverseData)
def addAutoData(): _deprecPlotFunc('addAutoData',Scene_plot_autoData,new='autoData')
def addData(): _deprecPlotFunc('addData',Scene_plot_addData)
def addImgData(): _deprecPlotFunc('addImgData',Scene_plot_addImgData)
def saveGnuplot(): _deprecPlotFunc('saveGnuplot',Scene_plot_saveGnuplot)
def saveDataTxt(): _deprecPlotFunc('saveDataTxt',Scene_plot_saveDataTxt)
def plot(): _deprecPlotFunc('plot',Scene_plot_plot)
# called at startup from from woo._monkey.plot
def defMonkeyMethods():
import woo.core
woo.core.Plot.reset=Scene_plot_reset
woo.core.Plot.resetData=Scene_plot_resetData
woo.core.Plot.splitData=Scene_plot_splitData
woo.core.Plot.reverseData=Scene_plot_reverseData
woo.core.Plot.autoData=Scene_plot_autoData
woo.core.Plot.addData=Scene_plot_addData
woo.core.Plot.addImgData=Scene_plot_addImgData
woo.core.Plot.saveGnuplot=Scene_plot_saveGnuplot
woo.core.Plot.saveDataTxt=Scene_plot_saveDataTxt
woo.core.Plot.plot=Scene_plot_plot
defMonkeyMethods()
| gpl-2.0 |
gurkirt/actNet-inAct | processing/detection-fusion-working.py | 1 | 32723 | '''
Autor: Gurkirt Singh
Start data: 15th May 2016
purpose: of this file is read frame level predictions and process them to produce a label per video
'''
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pickle
import os,h5py
import time,json
import scipy.io as sio
import copy
#import pylab as plt
#######baseDir = "/mnt/sun-alpha/actnet/";
baseDir = "/data/shared/solar-machines/actnet/";
baseDir = "/data/shared/solar-machines/actnet/";
########imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
######## imgDir = "/mnt/DATADISK2/ss-workspace/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
def readannos():
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
return actionIDs,taxonomy,database
def getnames():
fname = baseDir+'data/lists/gtnames.list'
with open(fname,'rb') as f:
lines = f.readlines()
names = []
for name in lines:
name = name.rstrip('\n')
names.append(name)
# print names
return names
def gettopklabel(preds,k,classtopk):
scores = np.zeros(200)
topk = min(classtopk,np.shape(preds)[1]);
for i in range(200):
values = preds[i,:];
values = np.sort(values);
values = values[::-1]
scores[i] = np.mean(values[:topk])
# print scores
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
return sortedlabel[:k],sortedscores[:k]
def gettopklabel4mp(scores,k):
scores = scores - np.min(scores);
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
ss = sortedscores[:20]
ss = ss/np.sum(ss)
ss = ss[:5]
ss = ss/np.sum(ss)
return sortedlabel[:k],ss[:k]
def sumfuse(mbh,ims,k):
mbh = mbh - np.min(mbh)+1.0;
ims = ims - np.min(ims)+1.0;
# mbh = mbh/np.sum(mbh)
# ims = ims/np.sum(ims)
scores = mbh*ims;
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
ss = sortedscores[:5]
ss = ss/np.sum(ss)
return sortedlabel[:k],ss[:k]
def wAPfuse(mbh,ims,wmbh,wims,k):
for i in range(200):
mbh[i] = (1+wmbh[i])*mbh[i]
ims[i] = (1+wims[i])*ims[i]
mbh = mbh - np.min(mbh)+1;
ims = ims - np.min(ims)+1;
# mbh = mbh/np.sum(mbh)
# ims = ims/np.sum(ims)
scores = mbh + ims;
# scores = np.mean(wmbh)*mbh+np.mean(wims)*ims;
# scores = np.zeros(200)
# for i in range(200):
# scores[i] = (mbh[i]*wmbh[i]+wims[i]*ims[i])/(wmbh[i]+wims[i]+1);
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
ss = sortedscores[:5]
ss = ss/np.sum(ss)
return sortedlabel[:k],ss[:k]
def getsegmentswithcls(preds,alpha=5):
labels,scores = gettopklabel(preds,10,200)
labels,scores = refineCalssification(labels,scores)
#(p,D) = dpEM(preds,alpha)
#labels,starts,ends = getLabels(p)
starts,ends = getfullLength(labels,np.shape(preds)[1])
# print 'Number of segments generated are ',np.shape(labels)
#scores = getscores(D,starts,ends,labels)
#labels,scores,starts,ends = removeBackground(labels,scores,starts,ends)
return labels,scores,starts,ends
def getfullLength(labels,length):
starts=[];ends=[]
offset = int(length*0.15)
for i in range(len(labels)):
starts.append(offset);
ends.append(length-offset)
return np.asarray(starts),np.asarray(ends)
def fuseThree(mbh,ims,c3d,numf,k):
mbh = mbh - np.min(mbh)+1;
ims = ims - np.min(ims)+1;
scores = mbh*ims*c3d;
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
sortedscores = scores[sortedlabel]
ss = sortedscores
ss = ss/np.sum(ss[:5])
labels,scores = sortedlabel[:k],ss[:k]
starts,ends = getfullLength(labels,numf)
return labels,scores,starts,ends
def dpEM(M,alpha):
(r,c) = np.shape(M);
D = np.zeros((r, c+1)) # add an extra column
D[:,0] = 1# % put the maximum cost
D[:, 1:(c+1)] = M;
# v = np.ones(r)*alpha;
phi = np.zeros((r,c))
# pdb.set_trace()
for j in xrange(1,c):
for i in xrange(r):
# values.index(min(values))
v1 = np.ones(r)*alpha
v1[i] = 0;
values= D[:, j-1]+v1
tb = np.argmin(values)
dmax = min(values)
D[i,j] = D[i,j]+dmax;
phi[i,j] = tb;
# pdb.set_trace()
q = c-1;
values= D[:, c-1]
p = np.argmin(values)
i = p
j = q
ps = np.zeros(c)
ps[q] = p
while j>0:
tb = phi[i,j];
j = int(j-1);
q = j;
ps[q] = tb;
i = int(tb);
D = D[:,1:];
return (ps,D)
def getLabels(p):
starts = np.zeros(500);
ends = np.zeros(500);
labels = np.zeros(500,dtype='int32');
fl = 0
i=0
starts[i]=0
fl = p[0]
labels[i] = p[0]
# print p[0]
# pdb.set_trace()
for ii in range(len(p)):
if abs(p[ii] -fl)>0:
ends[i]=ii-1
fl = p[ii]
i+=1
starts[i]=ii
labels[i] = fl
ends[i] = len(p)-1
# print i, starts[:i+1],ends[:i+1],labels[:i+1]
return labels[:i+1],starts[:i+1],ends[:i+1]
def getSegment4mAlphaEXT(topklabels,topscors,clscores,predEXT,C3Dfeat,fps,numf,alpha):
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:5])
for label in topklabels[:1]:
clScore = clscores[label]/norms;
colScore = predEXT[:,label];
colScore = colScore/np.max(colScore)
M = np.zeros((2,len(colScore)))
M[1,:] = 1-colScore
M[0,:] = colScore
# print M
scs = [.5,.6,.7,.8,.9,.1]
offsetA = 0;
while len(scs)>2:
(p,D) = dpEM(M,alpha+offsetA)
# print p
ls,ss,eds = getLabels(p)
scs,ss,eds = refinelabels(ls,ss,eds,colScore)
offsetA+=1
if len(scs)>0:
for ind in range(len(scs)):
labels.append(label)
scores.append(scs[ind])
starts.append(ss[ind])
ends.append(eds[ind])
else:
labels.append(label)
# scores.append(clScore)
scols = sorted(colScore)
scols = scols[::-1]
scores.append(np.mean(scols[:min(len(colScore),200)]))
starts.append(int(len(colScore)*0.10))
ends.append(int(len(colScore) - len(colScore)*0.10))
return labels,scores,starts,ends
def getSegmentBinaryC3D(topklabels,topscors,clscores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha):
# topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha
indexs = np.asarray(C3Dfeat['indexs']);
frameLabels = np.asarray(C3Dfeat['labels']);
preds = np.asarray(C3Dfeat['scores']);
c3numf = np.shape(preds)[0];
preds = preds - np.min(preds);
# predsEXT = predEXT['scores']
# sio.savemat('data.mat',mdict= {'indexs':indexs,'topklabels':topklabels,'topscors':topscors,'clscores':clscores,'preds':preds,'numf':numf,'fps':fps,'frameLabels':frameLabels,'predEXT':predEXT})
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:2])
topscors = topscors/norms;
lcount = 0;
binSVM = smoothit(np.asarray(C3DfeatbinSVM['scores']))
binRFcopy = np.asarray(C3DfeatbinRF['scores'])
for label in topklabels[:15]:
binRF = copy.deepcopy(binRFcopy[:,1])
clScore = topscors[lcount];
colScore = preds[:,label]
lcount +=1
# colScoreSmoothed = smoothColScores(colScore,10)
# binRF = colScoreSmoothed;
# binRF = smoothit(binRF);
#binRF = binRF-np.mean(binRF);
# sortedScores = sorted(binRF)
# offset = int(c3numf*0.06);
# minS = np.mean(sortedScores[:offset])
# sortedScores = sortedScores[:-1];
# print sortedScores
# maxS = np.mean(sortedScores[c3numf-int(1.5*offset):])
# print minS,maxS
# binRF = (binRF-minS)/(maxS-minS)
# extColScore = predEXT[indexs,label]
# binRF[extColScore>0.8] = 0.95;
# binRF[binSVM>0.65] = 0.85;
# binRF[binSVM>0.8] = 0.99;
# binRF[binSVM<-0.4] = 0.1;
# binRF[binSVM<-0.85] = 0.00;
# binRF[colScoreSmoothed>0.6] = 0.9;
# binRF[colScoreSmoothed<0.1] = 0.0;
# else:
# binRF = binRF-minS
# binRF = newScores;
# print 'saving it'
# sio.savemat('colScoreSmoothed.mat',mdict = {'binSVM':binSVM,'binRF':binRF,'frameLabels':frameLabels});
# sio.savemat('colScoreSmoothed.mat',mdict = {'binRF':binRF,'frameLabels':frameLabels});
# colScoreSmoothed = binRF[:,1]
# M = np.transpose(binRF);
# print M
M = np.zeros((2,c3numf))
M[0,:] = 1-binRF
M[1,:] = binRF
# # print M
ls = [1,2,3,4,5,6,7,8,9,10,11]
# talpha = alpha;
# while len(ls)>7:
(p,D) = dpEM(M,alpha)
ls,ss,eds = getLabels(p)
# talpha += 0.2
scs,ss,eds = refinelabels(ls,ss,eds,binRF)
# print scs,ss,eds
if len(scs)>0:
for ind in range(len(scs)):
labels.append(label)
scores.append(clScore)
starts.append(ss[ind])
ends.append(eds[ind])
else:
# error('we have problem')
# else:
labels.append(label)
# scols = sorted(binRF)
# scols = scols[::-1]
# seglen = min(int(len(scols)*0.6),30)
# scores.append(np.mean(scols[:seglen])*clScore)
scores.append(clScore)
# scores.append(clScore)
starts.append(int(c3numf*0.12))
ends.append(int(c3numf - c3numf*0.12))
# st = int(segInit[sInd]*t2f)
# et = int(segEnd[sInd]*t2f)
# bscore = clScore*np.mean(colScore[st:et])*pscores[sInd]
# # print st,et,bscore,np.mean(colScore[st:et]),pscores[sInd],clScore
#
# if bscore>0.01:
# labels.append(label); scores.append(bscore);
# starts.append(segInit[sInd]*fps);
# ends.append(segEnd[sInd]*fps);
return labels,scores,starts,ends,c3numf
def getSegment4mAlphaC3D(topklabels,topscors,clscores,predEXT,C3Dfeat,fps,numf,alpha):
indexs = np.asarray(C3Dfeat['indexs']);
frameLabels = np.asarray(C3Dfeat['labels']);
preds = np.asarray(C3Dfeat['scores']);
c3numf = np.shape(preds)[0];
preds = preds - np.min(preds);
sio.savemat('data.mat',mdict= {'indexs':indexs,'topklabels':topklabels,'topscors':topscors,'clscores':clscores,'preds':preds,'numf':numf,'fps':fps,'frameLabels':frameLabels,'predEXT':predEXT})
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:5])
topscors = topscors/norms;
lcount = 0;
for label in topklabels[:1]:
clScore = topscors[lcount];
lcount +=1
colScore = preds[:,label]/norms;
colScoreSmoothed = smoothColScores(colScore,10)
sio.savemat('colScoreSmoothed.mat',mdict = {'colScoreSmoothed':colScoreSmoothed,'colScore':colScore});
M = np.zeros((2,len(colScoreSmoothed)))
M[1,:] = 1-colScoreSmoothed
M[0,:] = colScoreSmoothed
# print M
(p,D) = dpEM(M,alpha)
# print p
ls,ss,eds = getLabels(p)
# print p
# print ls,ss,eds
scs,ss,eds = refinelabels(ls,ss,eds,colScoreSmoothed)
# print scs,ss,eds
if len(scs)>0:
for ind in range(len(scs)):
labels.append(label)
scores.append(scs[ind]*clScore)
starts.append(ss[ind])
ends.append(eds[ind])
# else:
labels.append(label)
scols = sorted(colScoreSmoothed)
scols = scols[::-1]
seglen = min(int(len(scols)*0.5),30)
scores.append(np.mean(scols[:seglen])*clScore)
# scores.append(clScore)
starts.append(int(c3numf*0.10))
ends.append(int(c3numf - c3numf*0.10))
# st = int(segInit[sInd]*t2f)
# et = int(segEnd[sInd]*t2f)
# bscore = clScore*np.mean(colScore[st:et])*pscores[sInd]
# # print st,et,bscore,np.mean(colScore[st:et]),pscores[sInd],clScore
#
# if bscore>0.01:
# labels.append(label); scores.append(bscore);
# starts.append(segInit[sInd]*fps);
# ends.append(segEnd[sInd]*fps);
return labels,scores,starts,ends,c3numf
def getBinaryAccuracy(topklabels,topscors,clscores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha):
# topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha
indexs = np.asarray(C3Dfeat['indexs']);
frameLabels = np.asarray(C3DfeatbinRF['labels']);
preds = np.asarray(C3Dfeat['scores']);
c3numf = np.shape(preds)[0];
preds = preds - np.min(preds);
# predsEXT = predEXT['scores']
# sio.savemat('data.mat',mdict= {'indexs':indexs,'topklabels':topklabels,'topscors':topscors,'clscores':clscores,'preds':preds,'numf':numf,'fps':fps,'frameLabels':frameLabels,'predEXT':predEXT})
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
# labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:2])
topscors = topscors/norms;
lcount = 0;
binSVM = smoothit(np.asarray(C3DfeatbinSVM['scores']),5)
binRF = np.asarray(C3DfeatbinRF['scores'])
# print ' shapes ', np.shape(binRF),np.shape(binSVM)
binRF = smoothit(binRF[:,1],5)
# print ' shapes ', np.shape(binRF)
label =topklabels[0]
clScore = topscors[lcount];
colScore = preds[:,label]
lcount +=1
colScoreSmoothed = smoothColScores(colScore,6)
extColScore = predEXT[indexs,label]
accEXT = 1.0; accbinSVM = 1.0; accbinRF = 1.0; accC3D = 1.0;
extth = 0.8; binSVMth = 0.8; binRFth = 0.8; c3dth = 0.8;
countrf = 1.0; countext = 1.0;
countc3 = 1.0; countBsvm = 1.0;
# binRF[binSVM<-0.1] = 0.05;
# binRF[extColScore>0.6] = 0.85;
for i in range(c3numf):
if extColScore[i]>=extth:
if frameLabels[i]<200:
accEXT+=1
countext = countext+1
if binRF[i]>=binRFth:
if frameLabels[i]<200:
accbinRF+=1
countrf = countrf+1
if binSVM[i]>=binSVMth:
if frameLabels[i]<200:
accbinSVM+=1
countBsvm = countBsvm+1
if colScoreSmoothed[i]>=c3dth:
if frameLabels[i]<200:
accC3D+=1
countc3 = countc3+1
# if (colScoreSmoothed[i]<c3dth and frameLabels[i]==200):
# accC3D+=1
# if (binRF[i]<binRFth and frameLabels[i]==200):
# accbinRF+=1
# if binSVM[i]<binSVMth and frameLabels[i]==200:
# accbinSVM+=1;
# if frameLabels[i]==200:
# count+=1
# for i in range(c3numf):
# if (extColScore[i]>=extth and frameLabels[i]<200) or (extColScore[i]<extth and frameLabels[i]==200):
# accEXT+=1
# if (colScoreSmoothed[i]>=c3dth and frameLabels[i]<200) or (colScoreSmoothed[i]<c3dth and frameLabels[i]==200):
# accC3D+=1
# if (binRF[i]>=binRFth and frameLabels[i]<200) or (binRF[i]<binRFth and frameLabels[i]==200):
# accbinRF+=1
# if (binSVM[i]>=binSVMth and frameLabels[i]<200) or (binSVM[i]<binSVMth and frameLabels[i]==200):
# accbinSVM+=1;
# if frameLabels[i]<201:
# count+=1
print np.asarray([accEXT/countext,accbinRF/countrf,accbinSVM/countBsvm,accC3D/countc3])
return accEXT/countext,accbinRF/countrf,accbinSVM/countBsvm,accC3D/countc3,1
def smoothit(colScore,hws=5):
if len(colScore)<hws:
colScore = colScore/np.max(colScore)
return colScore #hws = int(len(colScore)/2);
newScores = np.zeros_like(colScore)
numelm = len(colScore)
for i in range(numelm):
ts = 0;count = 0;
for k in np.arange(max(i-hws,0),min(numelm,i+hws),1):
count += 1
ts += colScore[k]
if count>0:
newScores[i] = float(ts)/count
else:
newScores[i] = colScore[i]
return newScores
def smoothColScores(colScore,hws=5):
if len(colScore)<hws:
colScore = colScore/np.max(colScore)
return colScore #hws = int(len(colScore)/2);
newScores = np.zeros_like(colScore)
numelm = len(colScore)
for i in range(numelm):
ts = 0;count = 0;
for k in np.arange(max(i-hws,0),min(numelm,i+hws),1):
count += 1
ts += colScore[k]
if count>0:
newScores[i] = float(ts)/count
else:
newScores[i] = colScore[i]
sortedScores = sorted(newScores)[::-1]
minS = np.mean(sortedScores[-5:-2])
# sortedScores = sortedScores
# print sortedScores
maxS = np.mean(sortedScores[1:5])
if maxS>0:
newScores = (newScores-minS)/(maxS-minS)
else:
newScores = newScores-minS
newScores[newScores<0] = 0
# for i in range(len(newScores)):
# if newScores[i]>0.4 and newScores[i]<0.5:
# newScores[i]*=1.5
# newScores[newScores>0.8] = 1.0
return newScores
def refinelabels(inlabels,instarts,inends,colScore):
scores = []; starts = []; ends = [];
offset = len(colScore)*0.15;
for ind in range(len(inlabels)):
segIndexs = np.asarray(np.arange(instarts[ind],inends[ind],dtype=int))
if inlabels[ind] == 0 and len(segIndexs)>offset:
starts.append(max(offset,instarts[ind]-5));
ends.append(min(inends[ind]+5,len(colScore)-offset))
scols = sorted(colScore[segIndexs])
seglen = min(int(len(scols)*0.8),190)
scols = scols[::-1]
sc = np.mean(scols[:seglen])
scores.append(len(segIndexs))
if len(scores)>0:
ind = np.argmax(scores)
return [scores[ind]],[starts[ind]],[ends[ind]]
else:
return scores,starts,ends
def getSegment4mProp(topklabels,topscors,clscores,C3Dfeat,props,fps,numf):
pscores = props['score'];
segInit = props['segment-init'];
segEnd = props['segment-end'];
indexs = C3Dfeat['indexs'];
frameLabels = C3Dfeat['labels'];
preds = C3Dfeat['scores'];
preds = preds - np.min(preds) + 1;
c3numf = np.shape(preds)[0];
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors)
for label in topklabels[:1]:
clScore = clscores[label]/norms;
colScore = preds[:,label]/norms;
print 'number of props',len(pscores)
for sInd in range(min(len(pscores),5)):
# if pscores[sInd]>0.3:
st = int(segInit[sInd]*t2f)
et = int(segEnd[sInd]*t2f)
bscore = clScore*np.mean(colScore[st:et])*pscores[sInd]
# print st,et,bscore,np.mean(colScore[st:et]),pscores[sInd],clScore
if bscore>0.01:
labels.append(label); scores.append(bscore);
starts.append(segInit[sInd]*fps);
ends.append(segEnd[sInd]*fps);
return labels,scores,starts,ends
def getTOPclasses(mbh,ims,c3d,k):
mbh = mbh - np.min(mbh)+1;
ims = ims - np.min(ims)+1;
scores = mbh*ims*c3d;
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
sortedscores = scores[sortedlabel]
return sortedlabel[:k],sortedscores[:k],scores
def getC3dMeanPreds(preds,classtopk=80):
preds = preds - np.min(preds) + 1;
scores = np.zeros(200)
topk = min(classtopk,np.shape(preds)[0]);
# for i in range(np.shape(preds)[0]):
# preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]) ;
for i in range(200):
values = preds[:,i];
values = np.sort(values);
values = values[::-1]
scores[i] = np.mean(values[:topk])
return scores
def readpkl(filename):
with open(filename) as f:
data = pickle.load(f)
return data
def processThreePredictions():
#########################################
#########################################
names = getnames()
gtlabels = readpkl('{}data/labels.pkl'.format(baseDir))
indexs = readpkl('{}data/indexs.pkl'.format(baseDir))
actionIDs,taxonomy,database = readannos()
########################################
########################################
for alpha in [3,]:
K = 15;
subset = 'testing';#,
featType = 'IMS'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataIMS = pickle.load(f)
featType = 'MBH'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataMBH = pickle.load(f)
featType = 'C3D'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3D = h5py.File(savename,'r');
featType = 'EXT'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileEXT = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWRF-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinRF = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWSVM-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinSVM = h5py.File(savename,'r');
# savename = '{}data/activitynet_v1-3_proposals.hdf5'.format(baseDir)
# infileProp = h5py.File(savename,'r');
#
outfilename = '{}results/detection/{}-{}-K-{}-alpha-{}.json'.format(baseDir,subset,'C3D-BIN-BOOST-LONG',str(K).zfill(3),str(int(alpha*10)).zfill(3))
if True: #not os.path.isfile(outfilename):
vcount = 0;
vdata = {};
vdata['external_data'] = {'used':True, 'details':"We use ImagenetShuffle features, MBH features and C3D features provided on challenge page."}
vdata['version'] = "VERSION 1.3"
results = {}
for videoId in database.keys():
videoInfo = database[videoId]
numf = videoInfo['numf'];
duration = videoInfo['duration']
#fps = videoInfo['fps'];
fps = numf/duration;
if videoInfo['subset'] == subset:
vcount +=1
if vcount > -1:
vidresults = []
# print videoInfo
# vcount+=1
vidname = 'v_'+videoId
# print 'processing ', vidname, ' vcount ',vcount
ind = dataMBH['vIndexs'][videoId]
predsMBH = dataMBH['scores'][ind,:]
ind = dataIMS['vIndexs'][videoId]
predsIMS = dataIMS['scores'][ind,:]
C3Dfeat = infileC3D[videoId]
C3Dscores = C3Dfeat['scores']
predS3D = getC3dMeanPreds(C3Dscores)
# props = infileProp[vidname]
predEXT = np.transpose(infileEXT[videoId]['scores'])
# predEXT = getC3dMeanPreds(preds,220)
C3DfeatbinRF = infileC3DbinRF[videoId]
C3DfeatbinSVM = infileC3DbinSVM[videoId]
#print 'shape of preds',np.shape(preds)
print 'processing ', vidname, ' vcount ',vcount,' fps ',fps, ' numf ',numf,' alpha ',alpha,
# labels,scores,starts,ends = fuseThree(predsMBH,predsIMS,predS3D,numf,K)
topklabels,topscors,scores= getTOPclasses(predsMBH,predsIMS,predS3D,K)
# labels,scores,starts,ends = getSegment4mProp(topklabels,topscors,scores,C3Dfeat,props,fps,numf)
# labels,scores,starts,ends = getSegment4mAlphaEXT(topklabels,topscors,scores,predEXT,C3Dfeat,fps,numf,alpha)
labels,scores,starts,ends,c3numf = getSegmentBinaryC3D(topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha)
print ' Number of detection are ',len(labels)
# print labels,scores
fps = c3numf/duration;
for idx in range(len(labels)):
score = scores[idx]
label = labels[idx]
name = names[label]
st = float(starts[idx])/fps
et = float(ends[idx])/fps
segment = [];
segment.append(st);segment.append(et)
# print label,score,segment,starts[idx],ends[idx]
tempdict = {'label':name,'score':float(score),'segment':segment}
vidresults.append(tempdict)
results[videoId] = vidresults
vdata['results'] = results
print 'result-saved-in ',outfilename
with open(outfilename,'wb') as f:
json.dump(vdata,f)
def getaccuracy():
#########################################
#########################################
names = getnames()
gtlabels = readpkl('{}data/labels.pkl'.format(baseDir))
indexs = readpkl('{}data/indexs.pkl'.format(baseDir))
actionIDs,taxonomy,database = readannos()
########################################
########################################
for alpha in [0.3,]:
K = 5;
subset = 'validation';#,'testing']:
featType = 'IMS'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataIMS = pickle.load(f)
featType = 'MBH'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataMBH = pickle.load(f)
featType = 'C3D'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3D = h5py.File(savename,'r');
featType = 'EXT'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileEXT = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWRF-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinRF = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWSVM-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinSVM = h5py.File(savename,'r');
# savename = '{}data/activitynet_v1-3_proposals.hdf5'.format(baseDir)
# infileProp = h5py.File(savename,'r');
#
outfilename = '{}results/detection/{}-{}-K-{}-alpha-{}.json'.format(baseDir,subset,'C3D-BIN',str(K).zfill(3),str(int(alpha*10)).zfill(3))
accEXT = 0.0; accbinSVM = 0.0; accbinRF = 0.0; accC3D = 0.0;
count = 0;
if True: #not os.path.isfile(outfilename):
vcount = 0;
vdata = {};
vdata['external_data'] = {'used':True, 'details':"We use extraction Net model with its weights pretrained on imageNet dataset and fine tuned on Activitty Net. Plus ImagenetShuffle, MBH features, C3D features privide on challenge page"}
vdata['version'] = "VERSION 1.3"
results = {}
for videoId in database.keys():
videoInfo = database[videoId]
numf = videoInfo['numf'];
duration = videoInfo['duration']
#fps = videoInfo['fps'];
fps = numf/duration;
if videoInfo['subset'] == subset:
vcount +=1
if vcount <2000:
vidresults = []
# print videoInfo
# vcount+=1
vidname = 'v_'+videoId
# print 'processing ', vidname, ' vcount ',vcount
ind = dataMBH['vIndexs'][videoId]
predsMBH = dataMBH['scores'][ind,:]
ind = dataIMS['vIndexs'][videoId]
predsIMS = dataIMS['scores'][ind,:]
C3Dfeat = infileC3D[videoId]
C3Dscores = C3Dfeat['scores']
predS3D = getC3dMeanPreds(C3Dscores)
# props = infileProp[vidname]
predEXT = np.transpose(infileEXT[videoId]['scores'])
# predEXT = getC3dMeanPreds(preds,220)
C3DfeatbinRF = infileC3DbinRF[videoId]
C3DfeatbinSVM = infileC3DbinSVM[videoId]
#print 'shape of preds',np.shape(preds)
print 'processing ', vidname, ' vcount ',vcount,' fps ',fps, ' numf ',numf,' alpha ',alpha,
# labels,scores,starts,ends = fuseThree(predsMBH,predsIMS,predS3D,numf,K)
topklabels,topscors,scores= getTOPclasses(predsMBH,predsIMS,predS3D,K)
# labels,scores,starts,ends = getSegment4mProp(topklabels,topscors,scores,C3Dfeat,props,fps,numf)
# labels,scores,starts,ends = getSegment4mAlphaEXT(topklabels,topscors,scores,predEXT,C3Dfeat,fps,numf,alpha)
aEXT,aSVM,aRF,aC3D,cnf= getBinaryAccuracy(topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha)
accEXT += aEXT; accbinSVM += aSVM; accbinRF += aRF; accC3D += aC3D;
count +=cnf
print 'Avergae Accuracy is ', np.asarray([accEXT,accbinRF,accbinSVM,accC3D])/count
if __name__=="__main__":
# processOnePredictions()
# processTwoPredictions()
# fuse2withAP()
processThreePredictions()
# getaccuracy()
| mit |
oxtopus/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py | 70 | 375423 | """
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
| gpl-3.0 |
Traecp/MCA_GUI | McaGUI_v17.py | 3 | 73468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.ndimage
from scipy import stats
from scipy.fftpack import fft, fftfreq, fftshift
import os, sys
import gc
from os import listdir
from os.path import isfile,join
import gtk
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.use('GtkAgg')
from matplotlib.figure import Figure
#from matplotlib.axes import Subplot
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from matplotlib.cm import jet#, gist_rainbow # colormap
from matplotlib.widgets import Cursor
#from matplotlib.patches import Rectangle
from matplotlib import path
#import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import xrayutilities as xu
from lmfit import Parameters, minimize
import h5py as h5
from MCA_GUI import mca_spec as SP
__version__ = "1.1.8"
__date__ = "06/11/2014"
__author__ = "Thanh-Tra NGUYEN"
__email__ = "thanhtra0104@gmail.com"
#mpl.rcParams['font.size'] = 18.0
#mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.handletextpad'] = 0.5
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.subplot.bottom'] = 0.13
mpl.rcParams['figure.subplot.top'] = 0.93
mpl.rcParams['figure.subplot.left'] = 0.14
mpl.rcParams['figure.subplot.right'] = 0.915
mpl.rcParams['savefig.dpi'] = 300
def Fourier(X,vect):
N = vect.size #number of data points
T = X[1] - X[0] #sample spacing
TF = fft(vect)
xf = fftfreq(N,T)
xf = fftshift(xf)
yplot = fftshift(TF)
yplot = np.abs(yplot)
yplot = yplot[N/2:]
xf = xf[N/2:]
return xf, yplot/yplot.max()
def flat_data(data,dynlow, dynhigh, log):
""" Returns data where maximum superior than 10^dynhigh will be replaced by 10^dynhigh, inferior than 10^dynlow will be replaced by 10^dynlow"""
if log:
mi = 10**dynlow
ma = 10**dynhigh
data=np.minimum(np.maximum(data,mi),ma)
data=np.log10(data)
else:
mi = dynlow
ma = dynhigh
data=np.minimum(np.maximum(data,mi),ma)
return data
def psdVoigt(parameters,x):
"""Define pseudovoigt function"""
y0 = parameters['y0'].value
xc = parameters['xc'].value
A = parameters['A'].value
w = parameters['w'].value
mu = parameters['mu'].value
y = y0 + A * ( mu * (2/np.pi) * (w / (4*(x-xc)**2 + w**2)) + (1 - mu) * (np.sqrt(4*np.log(2)) / (np.sqrt(np.pi) * w)) * np.exp(-(4*np.log(2)/w**2)*(x-xc)**2) )
return y
def objective(pars,y,x):
#we will minimize this function
err = y - psdVoigt(pars,x)
return err
def init(data_x,data_y,xc,arbitrary=False):
""" param = [y0, xc, A, w, mu]
Je veux que Xc soit la position que l'utilisateur pointe sur l'image pour tracer les profiles"""
param = Parameters()
#idA=np.where(data_x - xc < 1e-4)[0]
if arbitrary:
A = data_y.max()
else:
idA=np.where(data_x==xc)[0][0]
A = data_y[idA]
y0 = 1.0
w = 0.5
mu = 0.5
param.add('y0', value=y0)
param.add('xc', value=xc)
param.add('A', value=A)
param.add('w', value=w)
param.add('mu', value=mu, min=0., max=1.)
return param
def fit(data_x,data_y,xc, arbitrary=False):
""" return: fitted data y, fitted parameters """
param_init = init(data_x,data_y,xc,arbitrary)
if data_x[0] > data_x[-1]:
data_x = data_x[::-1]
result = minimize(objective, param_init, args=(data_y,data_x))
x = np.linspace(data_x.min(),data_x.max(),data_x.shape[0])
y = psdVoigt(param_init,x)
return param_init, y
class PopUpFringes(object):
def __init__(self, xdata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
xi = np.arange(len(self.xdata))
slope, intercept, r_value, p_value, std_err = stats.linregress(self.xdata,xi)
fitline = slope*self.xdata+intercept
self.ax.plot(self.xdata, fitline, 'r-',self.xdata,xi, 'bo')
self.ax.axis([self.xdata.min(),self.xdata.max(),xi.min()-1, xi.max()+1])
self.ax.text(0.3, 0.9,'Slope = %.4f +- %.4f' % (slope, std_err),
horizontalalignment='center',
verticalalignment='center',
transform = self.ax.transAxes,
color='red')
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
class PopUpImage(object):
def __init__(self, xdata, ydata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
self.ydata = ydata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.canvas.mpl_connect("button_press_event",self.on_press)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
self.ax.plot(self.xdata, self.ydata, 'b-', lw=2)
self.textes = []
self.plots = []
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
def on_press(self, event):
if event.inaxes == self.ax and event.button==3:
self.clear_notes()
xc = event.xdata
#***** Find the closest x value *****
residuel = self.xdata - xc
residuel = np.abs(residuel)
j = np.argmin(residuel)
#y = self.ydata[i-1:i+1]
#yc= y.max()
#j = np.where(self.ydata == yc)
#j = j[0][0]
xc= self.xdata[j]
x_fit = self.xdata[j-3:j+3]
y_fit = self.ydata[j-3:j+3]
fitted_param, fitted_data = fit(x_fit, y_fit, xc, True)
x_fit = np.linspace(x_fit.min(), x_fit.max(), 200)
y_fit = psdVoigt(fitted_param, x_fit)
period = fitted_param['xc'].value
std_err= fitted_param['xc'].stderr
p = self.ax.plot(x_fit, y_fit,'r-')
p2 = self.ax.axvline(period,color='green',lw=2)
txt=self.ax.text(0.05, 0.9, 'Period = %.4f +- %.4f (nm)'%(period, std_err), transform = self.ax.transAxes, color='red')
self.textes.append(txt)
self.plots.append(p[0])
self.plots.append(p2)
elif event.inaxes == self.ax and event.button==2:
dif = np.diff(self.ydata)
dif = dif/dif.max()
p3=self.ax.plot(dif,'r-')
self.plots.append(p3[0])
self.canvas.draw()
def clear_notes(self):
if len(self.textes)>0:
for t in self.textes:
t.remove()
if len(self.plots)>0:
for p in self.plots:
p.remove()
self.textes = []
self.plots = []
class MyMainWindow(gtk.Window):
def __init__(self):
super(MyMainWindow, self).__init__()
self.set_title("MCA Reciprocal space map processing. Version %s - last update on: %s"%(__version__,__date__))
self.set_size_request(1200,900)
self.set_position(gtk.WIN_POS_CENTER)
self.set_border_width(10)
self.toolbar = gtk.Toolbar()
self.toolbar.set_style(gtk.TOOLBAR_ICONS)
self.refreshtb = gtk.ToolButton(gtk.STOCK_REFRESH)
self.opentb = gtk.ToolButton(gtk.STOCK_OPEN)
self.sep = gtk.SeparatorToolItem()
self.aspecttb = gtk.ToolButton(gtk.STOCK_PAGE_SETUP)
self.quittb = gtk.ToolButton(gtk.STOCK_QUIT)
self.toolbar.insert(self.opentb, 0)
self.toolbar.insert(self.refreshtb, 1)
self.toolbar.insert(self.aspecttb, 2)
self.toolbar.insert(self.sep, 3)
self.toolbar.insert(self.quittb, 4)
self.tooltips = gtk.Tooltips()
self.tooltips.set_tip(self.refreshtb,"Reload data files")
self.tooltips.set_tip(self.opentb,"Open a folder containing HDF5 (*.h5) data files")
self.tooltips.set_tip(self.aspecttb,"Change the graph's aspect ratio")
self.tooltips.set_tip(self.quittb,"Quit the program")
self.opentb.connect("clicked", self.choose_folder)
self.refreshtb.connect("clicked",self.folder_update)
self.aspecttb.connect("clicked",self.change_aspect_ratio)
self.quittb.connect("clicked", gtk.main_quit)
self.graph_aspect = False #Flag to change the aspect ratio of the graph, False = Auto, True = equal
############################# BOXES ###############################################
vbox = gtk.VBox()
vbox.pack_start(self.toolbar,False,False,0)
hbox=gtk.HBox()
######################### TREE VIEW #############################################
self.sw = gtk.ScrolledWindow()
self.sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
hbox.pack_start(self.sw, False, False, 0)
self.store=[]
self.list_store = gtk.ListStore(str)
self.treeView = gtk.TreeView(self.list_store)
self.treeView.connect("row-activated",self.on_changed_rsm)
rendererText = gtk.CellRendererText()
self.TVcolumn = gtk.TreeViewColumn("RSM data files", rendererText, text=0)
self.TVcolumn.set_sort_column_id(0)
self.treeView.append_column(self.TVcolumn)
self.sw.add(self.treeView)
self.GUI_current_folder = self.DATA_current_folder = os.getcwd()
#******************************************************************
# Notebooks
#******************************************************************
self.notebook = gtk.Notebook()
self.page_GUI = gtk.HBox()
self.page_conversion = gtk.VBox()
self.page_XRDML = gtk.VBox()
######################################FIGURES####################33
#self.page_single_figure = gtk.HBox()
self.midle_panel = gtk.VBox()
self.rsm = ""
self.rsm_choosen = ""
self.my_notes = []
self.lines = []
self.points=[]
self.polygons=[]
self.fig=Figure(dpi=100)
## Draw line for arbitrary profiles
self.arb_lines_X = []
self.arb_lines_Y = []
self.arb_line_points = 0
#self.ax = self.fig.add_subplot(111)
self.ax = self.fig.add_axes([0.1,0.2,0.7,0.7])
self.fig.subplots_adjust(left=0.1,bottom=0.20, top=0.90)
self.vmin = 0
self.vmax = 1000
self.vmax_range = self.vmax
self.canvas = FigureCanvas(self.fig)
Fig_hbox = gtk.HBox()
self.Export_HQ_Image_btn = gtk.Button("Save HQ image")
self.Export_HQ_Image_btn.connect("clicked", self.Export_HQ_Image)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#Global color bar
self.cax = self.fig.add_axes([0.85, 0.20, 0.03, 0.70])#left,bottom,width,height
#self.canvas.mpl_connect("motion_notify_event",self.on_motion)
self.canvas.mpl_connect("button_press_event",self.on_press)
#self.canvas.mpl_connect("button_release_event",self.on_release)
self.mouse_moved = False #If click without move: donot zoom the image
Fig_hbox.pack_start(self.Export_HQ_Image_btn, False, False, 0)
Fig_hbox.pack_start(self.main_figure_navBar, True,True, 0)
self.midle_panel.pack_start(Fig_hbox, False,False, 0)
self.midle_panel.pack_start(self.canvas, True,True, 2)
self.page_GUI.pack_start(self.midle_panel, True,True, 0)
#hbox.pack_start(self.midle_panel, True,True, 0)
########################################## RIGHT PANEL ###################
self.right_panel = gtk.VBox(False,0)
self.linear_scale_btn = gtk.ToggleButton("Linear scale")
self.linear_scale_btn.set_usize(30,0)
self.linear_scale_btn.connect("toggled",self.log_update)
self.log_scale=0
#self.wavelength_txt = gtk.Label("Energy (eV)")
##self.wavelength_txt.set_alignment(1,0.5)
#self.wavelength_field = gtk.Entry()
#self.wavelength_field.set_text("8333")
#self.wavelength_field.set_usize(30,0)
#self.lattice_const_txt = gtk.Label("Lattice constant (nm)")
#self.lattice_const_txt.set_alignment(1,0.5)
#self.lattice_const = gtk.Entry()
#self.lattice_const.set_text("0.5431")
#self.lattice_const.set_usize(30,0)
self.int_range_txt = gtk.Label("Integration range")
self.int_range_txt.set_alignment(1,0.5)
self.int_range = gtk.Entry()
self.int_range.set_text("0.05")
self.int_range.set_usize(30,0)
self.fitting_range_txt = gtk.Label("Fitting range")
self.fitting_range_txt.set_alignment(1,0.5)
self.fitting_range = gtk.Entry()
self.fitting_range.set_text("0.1")
self.fitting_range.set_usize(30,0)
# ********** Set the default values for configuration *************
self.plotXYprofiles_btn = gtk.RadioButton(None,"Plot X,Y profiles")
self.plotXYprofiles_btn.set_active(False)
self.arbitrary_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"Arbitrary profiles")
self.rectangle_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"ROI projection")
self.option_table = gtk.Table(4,3,False)#Pack the options
self.option_table.attach(self.linear_scale_btn, 0,1,0,1)
self.option_table.attach(self.plotXYprofiles_btn,0,1,1,2)
self.option_table.attach(self.arbitrary_profiles_btn,0,1,2,3)
self.option_table.attach(self.rectangle_profiles_btn,0,1,3,4)
# self.option_table.attach(self.wavelength_txt,1,2,0,1)
# self.option_table.attach(self.wavelength_field,2,3,0,1)
# self.option_table.attach(self.lattice_const_txt,1,2,1,2)
# self.option_table.attach(self.lattice_const, 2,3,1,2)
self.option_table.attach(self.int_range_txt, 1,2,0,1)
self.option_table.attach(self.int_range, 2,3,0,1)
self.option_table.attach(self.fitting_range_txt, 1,2,1,2)
self.option_table.attach(self.fitting_range, 2,3,1,2)
### Options for profile plots
self.profiles_log_btn = gtk.ToggleButton("Y-Log")
self.profiles_log_btn.connect("toggled",self.profiles_update)
self.profiles_export_data_btn = gtk.Button("Export data")
self.profiles_export_data_btn.connect("clicked",self.profiles_export)
self.profiles_option_box = gtk.HBox(False,0)
self.profiles_option_box.pack_start(self.profiles_log_btn, False, False, 0)
self.profiles_option_box.pack_start(self.profiles_export_data_btn, False, False, 0)
### Figure of profiles plot
self.profiles_fringes = []
self.fig_profiles = Figure()
self.profiles_ax1 = self.fig_profiles.add_subplot(211)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2 = self.fig_profiles.add_subplot(212)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas = FigureCanvas(self.fig_profiles)
self.profiles_canvas.set_size_request(450,50)
self.profiles_canvas.mpl_connect("button_press_event",self.profile_press)
self.profiles_navBar = NavigationToolbar(self.profiles_canvas, self)
self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
#### Results of fitted curves
self.fit_results_table = gtk.Table(7,3, False)
title = gtk.Label("Fitted results:")
self.chi_title = gtk.Label("Qz profile")
self.tth_title = gtk.Label("Qx profile")
y0 = gtk.Label("y0:")
xc = gtk.Label("xc:")
A = gtk.Label("A:")
w = gtk.Label("FWHM:")
mu = gtk.Label("mu:")
y0.set_alignment(0,0.5)
xc.set_alignment(0,0.5)
A.set_alignment(0,0.5)
w.set_alignment(0,0.5)
mu.set_alignment(0,0.5)
self.Qz_fitted_y0 = gtk.Label()
self.Qz_fitted_xc = gtk.Label()
self.Qz_fitted_A = gtk.Label()
self.Qz_fitted_w = gtk.Label()
self.Qz_fitted_mu = gtk.Label()
self.Qx_fitted_y0 = gtk.Label()
self.Qx_fitted_xc = gtk.Label()
self.Qx_fitted_A = gtk.Label()
self.Qx_fitted_w = gtk.Label()
self.Qx_fitted_mu = gtk.Label()
self.fit_results_table.attach(title,0,3,0,1)
self.fit_results_table.attach(self.chi_title,1,2,1,2)
self.fit_results_table.attach(self.tth_title,2,3,1,2)
self.fit_results_table.attach(y0,0,1,2,3)
self.fit_results_table.attach(xc,0,1,3,4)
self.fit_results_table.attach(A,0,1,4,5)
self.fit_results_table.attach(w,0,1,5,6)
self.fit_results_table.attach(mu,0,1,6,7)
self.fit_results_table.attach(self.Qz_fitted_y0,1,2,2,3)
self.fit_results_table.attach(self.Qz_fitted_xc,1,2,3,4)
self.fit_results_table.attach(self.Qz_fitted_A,1,2,4,5)
self.fit_results_table.attach(self.Qz_fitted_w,1,2,5,6)
self.fit_results_table.attach(self.Qz_fitted_mu,1,2,6,7)
self.fit_results_table.attach(self.Qx_fitted_y0,2,3,2,3)
self.fit_results_table.attach(self.Qx_fitted_xc,2,3,3,4)
self.fit_results_table.attach(self.Qx_fitted_A,2,3,4,5)
self.fit_results_table.attach(self.Qx_fitted_w,2,3,5,6)
self.fit_results_table.attach(self.Qx_fitted_mu,2,3,6,7)
#### PACK the right panel
self.right_panel.pack_start(self.option_table, False, False, 0)
self.right_panel.pack_start(self.profiles_option_box,False,False,0)
self.right_panel.pack_start(self.profiles_navBar,False,False,0)
self.right_panel.pack_start(self.profiles_canvas,True,True,0)
self.right_panel.pack_start(self.fit_results_table, False, False, 0)
self.page_GUI.pack_end(self.right_panel,False, False,5)
#********************************************************************
# Conversion data SPEC to HDF page
#********************************************************************
self.conv_box = gtk.VBox()
self.box1 = gtk.HBox()
self.det_frame = gtk.Frame()
self.det_frame.set_label("Detector Vantec")
self.det_frame.set_label_align(0.5,0.5)
self.exp_frame = gtk.Frame()
self.exp_frame.set_label("Experiment parameters")
self.exp_frame.set_label_align(0.5,0.5)
self.conv_frame = gtk.Frame()
self.conv_frame.set_label("Data conversion: SPEC-HDF5")
self.conv_frame.set_label_align(0.5,0.5)
#self.conv_frame.set_alignment(0.5,0.5)
#********************************************************************
# Detector parameters
#********************************************************************
self.det_table = gtk.Table(6,2,False)
self.t1 = gtk.Label("Detector size (mm)")
self.t2 = gtk.Label("Number of channels")
self.t3 = gtk.Label("Center channel")
self.t4 = gtk.Label("Channels/Degree")
self.t5 = gtk.Label("ROI (from-to)")
self.t6 = gtk.Label("Orientation")
self.t1.set_alignment(0,0.5)
self.t2.set_alignment(0,0.5)
self.t3.set_alignment(0,0.5)
self.t4.set_alignment(0,0.5)
self.t5.set_alignment(0,0.5)
self.t6.set_alignment(0,0.5)
self.t1_entry = gtk.Entry()
self.t1_entry.set_text("50")
self.t2_entry = gtk.Entry()
self.t2_entry.set_text("2048")
self.t3_entry = gtk.Entry()
self.t3_entry.set_text("819.87")
self.t4_entry = gtk.Entry()
self.t4_entry.set_text("211.012")
self.small_box = gtk.HBox()
self.t5_label = gtk.Label("-")
self.t5_entry1 = gtk.Entry()
self.t5_entry1.set_text("40")
self.t5_entry2 = gtk.Entry()
self.t5_entry2.set_text("1300")
self.small_box.pack_start(self.t5_entry1,True, True,0)
self.small_box.pack_start(self.t5_label,True, True,0)
self.small_box.pack_start(self.t5_entry2,True, True,0)
self.t6_entry = gtk.combo_box_new_text()
self.t6_entry.append_text("Up (zero on the bottom)")
self.t6_entry.append_text("Down (zero on the top)")
self.t6_entry.set_active(1)
self.det_table.attach(self.t1, 0,1,0,1)
self.det_table.attach(self.t2, 0,1,1,2)
self.det_table.attach(self.t3, 0,1,2,3)
self.det_table.attach(self.t4, 0,1,3,4)
self.det_table.attach(self.t5, 0,1,4,5)
self.det_table.attach(self.t6, 0,1,5,6)
self.det_table.attach(self.t1_entry, 1,2,0,1)
self.det_table.attach(self.t2_entry, 1,2,1,2)
self.det_table.attach(self.t3_entry, 1,2,2,3)
self.det_table.attach(self.t4_entry, 1,2,3,4)
self.det_table.attach(self.small_box, 1,2,4,5)
self.det_table.attach(self.t6_entry, 1,2,5,6)
self.det_table_align = gtk.Alignment()
self.det_table_align.set_padding(15,10,10,10)
self.det_table_align.set(0.5, 0.5, 1.0, 1.0)
self.det_table_align.add(self.det_table)
self.det_frame.add(self.det_table_align)
#********************************************************************
# Experiment parameters
#********************************************************************
self.exp_table = gtk.Table(6,2,False)
self.e1 = gtk.Label("Substrate material:")
self.e1_other = gtk.Label("If other:")
self.e2 = gtk.Label("Energy (eV)")
self.e3 = gtk.Label("Attenuation coefficient file")
self.e4 = gtk.Label("Foil colunm name (in SPEC file)")
self.e5 = gtk.Label("Monitor colunm name (in SPEC file)")
self.e6 = gtk.Label("Reference monitor (for normalization)")
self.e1.set_alignment(0,0.5)
self.e1_other.set_alignment(1,0.5)
self.e2.set_alignment(0,0.5)
self.e3.set_alignment(0,0.5)
self.e4.set_alignment(0,0.5)
self.e5.set_alignment(0,0.5)
self.e6.set_alignment(0,0.5)
#self.e1_entry = gtk.Label("Si for now")
self.e1_entry = gtk.combo_box_new_text()
self.e1_entry.append_text("-- other")
self.e1_entry.append_text("Si")
self.e1_entry.append_text("Ge")
self.e1_entry.append_text("GaAs")
self.e1_entry.append_text("GaP")
self.e1_entry.append_text("GaSb")
self.e1_entry.append_text("InAs")
self.e1_entry.append_text("InP")
self.e1_entry.append_text("InSb")
self.e1_entry.set_active(1)
self.e1_entry_other = gtk.Entry()
self.e1_entry_other.set_text("")
self.e2_entry = gtk.Entry()
self.e2_entry.set_text("8333")
self.e3_box = gtk.HBox()
self.e3_path =gtk.Entry()
self.e3_browse = gtk.Button("Browse")
self.e3_browse.connect("clicked", self.select_file, self.e3_path, "A")
self.e3_box.pack_start(self.e3_path, False, False, 0)
self.e3_box.pack_start(self.e3_browse, False, False, 0)
self.e4_entry = gtk.Entry()
self.e4_entry.set_text("pfoil")
self.e5_entry = gtk.Entry()
self.e5_entry.set_text("vct3")
self.e6_entry = gtk.Entry()
self.e6_entry.set_text("1e6")
substrate_box1 = gtk.HBox()
substrate_box2 = gtk.HBox()
substrate_box1.pack_start(self.e1, False, False, 0)
substrate_box1.pack_start(self.e1_entry, False, False, 0)
substrate_box2.pack_start(self.e1_other, False, False, 0)
substrate_box2.pack_start(self.e1_entry_other, False, False, 0)
self.exp_table.attach(substrate_box1, 0,1,0,1)
self.exp_table.attach(self.e2, 0,1,1,2)
self.exp_table.attach(self.e3, 0,1,2,3)
self.exp_table.attach(self.e4, 0,1,3,4)
self.exp_table.attach(self.e5, 0,1,4,5)
self.exp_table.attach(self.e6, 0,1,5,6)
self.exp_table.attach(substrate_box2, 1,2,0,1)
self.exp_table.attach(self.e2_entry, 1,2,1,2)
self.exp_table.attach(self.e3_box, 1,2,2,3)
self.exp_table.attach(self.e4_entry, 1,2,3,4)
self.exp_table.attach(self.e5_entry, 1,2,4,5)
self.exp_table.attach(self.e6_entry, 1,2,5,6)
self.exp_table_align = gtk.Alignment()
self.exp_table_align.set_padding(15,10,10,10)
self.exp_table_align.set(0.5, 0.5, 1.0, 1.0)
self.exp_table_align.add(self.exp_table)
self.exp_frame.add(self.exp_table_align)
#********************************************************************
# Data conversion information
#********************************************************************
self.conv_table = gtk.Table(6,3,False)
self.c1 = gtk.Label("Spec file")
self.c2 = gtk.Label("MCA file")
self.c3 = gtk.Label("Destination folder")
self.c4 = gtk.Label("Scan number (from-to)")
self.c5 = gtk.Label("Description for each RSM (optional-separate by comma)")
self.c6 = gtk.Label("Problem of foil delay (foil[n]-->data[n+1])")
self.c1.set_alignment(0,0.5)
self.c2.set_alignment(0,0.5)
self.c3.set_alignment(0,0.5)
self.c4.set_alignment(0,0.5)
self.c5.set_alignment(0,0.5)
self.c6.set_alignment(0,0.5)
self.c1_entry1 = gtk.Entry()
self.c2_entry1 = gtk.Entry()
self.c3_entry1 = gtk.Entry()
self.c4_entry1 = gtk.Entry()
self.c5_entry1 = gtk.Entry()
self.c5_entry1.set_text("")
self.c6_entry = gtk.CheckButton()
self.c1_entry2 = gtk.Button("Browse SPEC")
self.c2_entry2 = gtk.Button("Browse MCA")
self.c3_entry2 = gtk.Button("Browse Folder")
self.c4_entry2 = gtk.Entry()
self.c1_entry2.connect("clicked", self.select_file, self.c1_entry1, "S")
self.c2_entry2.connect("clicked", self.select_file, self.c2_entry1, "M")
self.c3_entry2.connect("clicked", self.select_folder, self.c3_entry1, "D")
self.conv_table.attach(self.c1, 0,1,0,1)
self.conv_table.attach(self.c2, 0,1,1,2)
self.conv_table.attach(self.c3, 0,1,2,3)
self.conv_table.attach(self.c4, 0,1,3,4)
self.conv_table.attach(self.c5, 0,1,4,5)
self.conv_table.attach(self.c6, 0,1,5,6)
self.conv_table.attach(self.c1_entry1, 1,2,0,1)
self.conv_table.attach(self.c2_entry1, 1,2,1,2)
self.conv_table.attach(self.c3_entry1, 1,2,2,3)
self.conv_table.attach(self.c4_entry1, 1,2,3,4)
self.conv_table.attach(self.c5_entry1, 1,3,4,5)
self.conv_table.attach(self.c6_entry, 1,2,5,6)
self.conv_table.attach(self.c1_entry2, 2,3,0,1)
self.conv_table.attach(self.c2_entry2, 2,3,1,2)
self.conv_table.attach(self.c3_entry2, 2,3,2,3)
self.conv_table.attach(self.c4_entry2, 2,3,3,4)
self.conv_table_align = gtk.Alignment()
self.conv_table_align.set_padding(15,10,10,10)
self.conv_table_align.set(0.5, 0.5, 1.0, 1.0)
self.conv_table_align.add(self.conv_table)
self.conv_frame.add(self.conv_table_align)
#********************************************************************
# The RUN button
#********************************************************************
self.run_conversion = gtk.Button("Execute")
self.run_conversion.connect("clicked", self.spec2HDF)
self.run_conversion.set_size_request(50,30)
self.show_info = gtk.Label()
#********************************************************************
# Pack the frames
#********************************************************************
self.box1.pack_start(self.det_frame,padding=15)
self.box1.pack_end(self.exp_frame, padding =15)
self.conv_box.pack_start(self.box1,padding=15)
self.conv_box.pack_start(self.conv_frame,padding=5)
self.conv_box.pack_start(self.run_conversion, False,False,10)
self.conv_box.pack_start(self.show_info, False,False,10)
self.page_conversion.pack_start(self.conv_box,False, False,20)
#********************************************************************
# Conversion XRDML data to HDF
#********************************************************************
self.XRDML_conv_box = gtk.VBox()
self.Instrument_table = gtk.Table(1,4,True)
self.Inst_txt = gtk.Label("Instrument:")
self.Inst_txt.set_alignment(0,0.5)
self.Instrument = gtk.combo_box_new_text()
self.Instrument.append_text("Bruker")
self.Instrument.append_text("PANalytical")
self.Instrument.set_active(0)
self.Instrument_table.attach(self.Inst_txt,0,1,0,1)
self.Instrument_table.attach(self.Instrument, 1,2,0,1)
self.Instrument.connect("changed",self.Change_Lab_Instrument)
self.choosen_instrument = self.Instrument.get_active_text()
self.XRDML_table = gtk.Table(7,4,True)
self.XRDML_tooltip = gtk.Tooltips()
self.XRDML_substrate_txt = gtk.Label("Substrate material:")
self.XRDML_substrate_other_txt = gtk.Label("If other:")
self.XRDML_substrate_inplane_txt= gtk.Label("In-plane direction (i.e. 1 1 0) - optional")
self.XRDML_substrate_outplane_txt= gtk.Label("Out-of-plane direction (i.e. 0 0 1)-optional")
self.XRDML_reflection_txt = gtk.Label("Reflection (H K L) - optional:")
self.XRDML_energy_txt = gtk.Label("Energy (eV) - optional:")
self.XRDML_description_txt = gtk.Label("Description of the sample:")
self.XRDML_xrdml_file_txt = gtk.Label("Select RAW file:")
self.XRDML_destination_txt = gtk.Label("Select a destination folder:")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_txt, "Substrate material")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_other_txt, "The substrate material, i.e. Al, SiO2, CdTe, GaN,...")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_inplane_txt, "The substrate in-plane an out-of-plane direction - for calculation of the orientation matrix.")
self.XRDML_tooltip.set_tip(self.XRDML_reflection_txt, "H K L, separate by space, i.e. 2 2 4 (0 0 0 for a XRR map). This is used for offset correction.")
self.XRDML_tooltip.set_tip(self.XRDML_energy_txt, "If empty, the default Cu K_alpha_1 will be used.")
self.XRDML_tooltip.set_tip(self.XRDML_description_txt, "Description of the sample, this will be the name of the converted file. If empty, it will be named 'RSM.h5'")
self.XRDML_tooltip.set_tip(self.XRDML_xrdml_file_txt, "Select the data file recorded by the chosen equipment")
self.XRDML_tooltip.set_tip(self.XRDML_destination_txt, "Select a destination folder to store the converted file.")
self.XRDML_substrate_txt.set_alignment(0,0.5)
self.XRDML_substrate_other_txt.set_alignment(1,0.5)
self.XRDML_substrate_inplane_txt.set_alignment(0,0.5)
self.XRDML_substrate_outplane_txt.set_alignment(1,0.5)
self.XRDML_reflection_txt.set_alignment(0,0.5)
self.XRDML_energy_txt.set_alignment(0,0.5)
self.XRDML_description_txt.set_alignment(0,0.5)
self.XRDML_xrdml_file_txt.set_alignment(0,0.5)
self.XRDML_destination_txt.set_alignment(0,0.5)
self.XRDML_substrate = gtk.combo_box_new_text()
self.XRDML_substrate.append_text("-- other")
self.XRDML_substrate.append_text("Si")
self.XRDML_substrate.append_text("Ge")
self.XRDML_substrate.append_text("GaAs")
self.XRDML_substrate.append_text("GaN")
self.XRDML_substrate.append_text("GaP")
self.XRDML_substrate.append_text("GaSb")
self.XRDML_substrate.append_text("InAs")
self.XRDML_substrate.append_text("InP")
self.XRDML_substrate.append_text("InSb")
self.XRDML_substrate.append_text("Al2O3")
self.XRDML_substrate.set_active(0)
self.XRDML_substrate_other = gtk.Entry()
self.XRDML_substrate_other.set_text("")
self.XRDML_substrate_inplane = gtk.Entry()
self.XRDML_substrate_inplane.set_text("")
self.XRDML_substrate_outplane = gtk.Entry()
self.XRDML_substrate_outplane.set_text("")
self.XRDML_reflection = gtk.Entry()
self.XRDML_reflection.set_text("")
self.XRDML_energy = gtk.Entry()
self.XRDML_energy.set_text("")
self.XRDML_description = gtk.Entry()
self.XRDML_description.set_text("")
self.XRDML_xrdml_file_path = gtk.Entry()
self.XRDML_destination_path = gtk.Entry()
self.XRDML_xrdml_file_browse = gtk.Button("Browse RAW file")
self.XRDML_destination_browse= gtk.Button("Browse destination folder")
self.XRDML_xrdml_file_browse.connect("clicked", self.select_file, self.XRDML_xrdml_file_path, "S")
self.XRDML_destination_browse.connect("clicked", self.select_folder, self.XRDML_destination_path, "D")
self.XRDML_table.attach(self.XRDML_substrate_txt, 0,1,0,1)
self.XRDML_table.attach(self.XRDML_substrate, 1,2,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other_txt, 2,3,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other, 3,4,0,1)
self.XRDML_table.attach(self.XRDML_substrate_inplane_txt, 0,1,1,2)
self.XRDML_table.attach(self.XRDML_substrate_inplane, 1,2,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane_txt, 2,3,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane, 3,4,1,2)
self.XRDML_table.attach(self.XRDML_reflection_txt, 0,1,2,3)
self.XRDML_table.attach(self.XRDML_reflection, 1,2,2,3)
self.XRDML_table.attach(self.XRDML_energy_txt,0,1,3,4)
self.XRDML_table.attach(self.XRDML_energy, 1,2,3,4)
self.XRDML_table.attach(self.XRDML_description_txt, 0,1,4,5)
self.XRDML_table.attach(self.XRDML_description, 1,2,4,5)
self.XRDML_table.attach(self.XRDML_xrdml_file_txt, 0,1,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_path, 1,2,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_browse, 2,3,5,6)
self.XRDML_table.attach(self.XRDML_destination_txt, 0,1,6,7)
self.XRDML_table.attach(self.XRDML_destination_path, 1,2,6,7)
self.XRDML_table.attach(self.XRDML_destination_browse, 2,3,6,7)
#********************************************************************
# The RUN button
#********************************************************************
self.XRDML_run = gtk.Button("Execute")
self.XRDML_run.connect("clicked", self.Convert_Lab_Source)
self.XRDML_run.set_size_request(50,30)
self.XRDML_show_info = gtk.Label()
#********************************************************************
# Pack the XRDML options
#********************************************************************
self.XRDML_conv_box.pack_start(self.Instrument_table, False, False,5)
self.XRDML_conv_box.pack_start(self.XRDML_table, False, False, 10)
self.XRDML_conv_box.pack_start(self.XRDML_run, False, False, 5)
self.XRDML_conv_box.pack_start(self.XRDML_show_info, False,False,10)
self.page_XRDML.pack_start(self.XRDML_conv_box,False, False,20)
#********************************************************************
# Pack the notebook
#********************************************************************
self.notebook.append_page(self.page_GUI, gtk.Label("RSM GUI"))
self.notebook.append_page(self.page_conversion, gtk.Label("ESRF-MCA spec file (Vantec)"))
self.notebook.append_page(self.page_XRDML, gtk.Label("Lab instruments"))
hbox.pack_start(self.notebook)
vbox.pack_start(hbox,True,True,0)
############################### Sliders ######################################
#sld_box = gtk.Fixed()
sld_box = gtk.HBox(False,2)
self.vmin_txt = gtk.Label("Vmin")
self.vmin_txt.set_alignment(0,0.5)
#self.vmin_txt.set_justify(gtk.JUSTIFY_CENTER)
self.vmax_txt = gtk.Label("Vmax")
self.vmax_txt.set_alignment(0,0.5)
#self.vmax_txt.set_justify(gtk.JUSTIFY_CENTER)
self.sld_vmin = gtk.HScale()
self.sld_vmax = gtk.HScale()
self.sld_vmin.set_size_request(200,25)
self.sld_vmax.set_size_request(200,25)
self.sld_vmin.set_range(0,self.vmax)
self.sld_vmax.set_range(0,self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(0)
self.sld_vmin.connect('value-changed',self.scale_update)
self.sld_vmax.connect('value-changed',self.scale_update)
vmax_spin_adj = gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmax_spin_btn = gtk.SpinButton(vmax_spin_adj,1,1)
self.vmax_spin_btn.set_numeric(True)
self.vmax_spin_btn.set_wrap(True)
self.vmax_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmax_spin_btn.connect('value-changed',self.scale_update_spin)
vmin_spin_adj = gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmin_spin_btn = gtk.SpinButton(vmin_spin_adj,1,1)
self.vmin_spin_btn.set_numeric(True)
self.vmin_spin_btn.set_wrap(True)
self.vmin_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmin_spin_btn.connect('value-changed',self.scale_update_spin)
sld_box.pack_start(self.vmin_txt,False,False,0)
sld_box.pack_start(self.sld_vmin,False,False,0)
sld_box.pack_start(self.vmin_spin_btn,False,False,0)
sld_box.pack_start(self.vmax_txt,False,False,0)
sld_box.pack_start(self.sld_vmax,False,False,0)
sld_box.pack_start(self.vmax_spin_btn,False,False,0)
#sld_box.pack_start(self.slider_reset_btn,False,False,0)
vbox.pack_start(sld_box,False,False,3)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
#########################################################################################################################
def format_coord(self, x, y):
#***** Add intensity information into the navigation toolbar *******************************
numrows, numcols = (self.gridder.data.T).shape
col,row = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = self.gridder.data.T[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
def pro_format_coord(self,x,y):
return 'x=%.4f, y=%.1f'%(x,y)
def init_image(self,log=False):
self.ax.cla()
self.cax.cla()
#print "Initialize image ..."
#
#self.clevels = np.linspace(self.vmin, self.vmax, 100)
if log:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T),vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T), self.clevels, vmin=self.vmin, vmax=self.vmax)
else:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T,vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T, self.clevels, vmin=self.vmin, vmax=self.vmax)
self.img.cmap.set_under(alpha=0)
self.ax.axis([self.gridder.xaxis.min(), self.gridder.xaxis.max(), self.gridder.yaxis.min(), self.gridder.yaxis.max()])
#self.ax.set_aspect('equal')
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.yaxis.label.set_size(20)
self.ax.xaxis.label.set_size(20)
self.ax.set_title(self.rsm_description,fontsize=20)
self.ax.format_coord = self.format_coord
self.cb = self.fig.colorbar(self.img, cax = self.cax, format="%.1f")#format=fm
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Intensity)\ [arb.\ units]$',fontsize=20)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=20)
self.cb.locator = MaxNLocator(nbins=6)
#self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#print "Image is initialized."
def change_aspect_ratio(self,w):
self.graph_aspect = not (self.graph_aspect)
if self.graph_aspect == True:
self.ax.set_aspect('equal')
else:
self.ax.set_aspect('auto')
self.canvas.draw()
def on_changed_rsm(self,widget,row,col):
#print "************Change RSM*************"
gc.collect() #Clear unused variables to gain memory
#************** Remind the structure of these HDF5 files:
# ************* file=[scan_id={'eta'=[data], '2theta'=[data], 'intensity'=[data], 'description'='RSM 004 ...'}]
self.clear_notes()
#self.init_image()
model = widget.get_model()
self.rsm_choosen = model[row][0]
self.rsm = join(self.GUI_current_folder,self.rsm_choosen)#file path
self.rsm_info = h5.File(self.rsm,'r')#HDF5 object that collects all information of this scan
#self.ax.set_title(self.rsm_choosen,fontsize=20)
### Data Loading ##
groups = self.rsm_info.keys()
scan = groups[0]
self.scan = self.rsm_info[scan]
self.data = self.scan.get('intensity').value
self.Qx = self.scan.get('Qx').value
self.Qy = self.scan.get('Qy').value
self.Qz = self.scan.get('Qz').value
self.rsm_description = self.scan.get('description').value
self.rsm_info.close()
#print "Data are successfully loaded."
self.gridder = xu.Gridder2D(self.data.shape[0],self.data.shape[1])
#print "Gridder is calculated."
# MM = self.data.max()
# M = np.log10(MM)
# data = flat_data(self.data,0,M)
self.gridder(self.Qx, self.Qz, self.data)
self.data = self.gridder.data.T
self.vmin=self.data.min()
self.vmax=self.data.max()
#print "Starting scale_plot()"
self.scale_plot()
#self.slider_update()
def scale_plot(self):
#print "Scale_plot() is called."
data = self.data.copy()
#self.init_image()
if self.linear_scale_btn.get_active():
self.linear_scale_btn.set_label("--> Linear scale")
data = np.log10(data)
#print data.max()
self.init_image(log=True)
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
self.vmax = np.log10(actual_vmax) if self.log_scale == 0 else actual_vmax
if actual_vmin == 0:
self.vmin=0
elif actual_vmin >0:
self.vmin = np.log10(actual_vmin) if self.log_scale == 0 else actual_vmin
self.vmax_range = data.max()
self.log_scale = 1
#log=True
else:
self.linear_scale_btn.set_label("--> Log scale")
self.init_image(log=False)
#print "Calculating min max and update slider..."
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
#print "Actual vmax: ",actual_vmax
if self.log_scale == 1:
self.vmax = np.power(10.,actual_vmax)
else:
self.vmax = actual_vmax
self.vmax_range = data.max()
if actual_vmin ==0:
self.vmin = 0
elif actual_vmin>0:
if self.log_scale == 0:
self.vmin = actual_vmin
elif self.log_scale == 1:
self.vmin = np.power(10,actual_vmin)
self.log_scale = 0
#log=False
#print "Min max are calculated."
self.sld_vmax.set_range(-6,self.vmax_range)
self.sld_vmin.set_range(-6,self.vmax_range)
#self.init_image(log)
self.slider_update()
def log_update(self,widget):
self.scale_plot()
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Counts\ per\ second)\ [arb.\ units]$',fontsize=18)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=18)
#self.slider_update()
def scale_update(self,widget):
#print "Scale_update() is called."
self.vmin = self.sld_vmin.get_value()
self.vmax = self.sld_vmax.get_value()
self.vmin_spin_btn.set_value(self.vmin)
self.vmax_spin_btn.set_value(self.vmax)
self.slider_update()
def scale_update_spin(self,widget):
#print "Spin_update() is called"
self.vmin = self.vmin_spin_btn.get_value()
self.vmax = self.vmax_spin_btn.get_value()
self.slider_update()
def slider_update(self):
#print "slider_update() is called"
#self.img.set_clim(self.vmin, self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(self.vmin)
if self.linear_scale_btn.get_active():
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.1, 1.0, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.1, 1.0, 0))
else:
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 10, 100, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 10, 100, 0))
#self.vmax_spin_btn.update()
self.img.set_clim(self.vmin, self.vmax)
self.ax.relim()
self.canvas.draw()
#print "slider_update() stoped."
def choose_folder(self, w):
dialog = gtk.FileChooserDialog(title="Select a data folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.GUI_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
folder = folder.decode('utf8')
folder_basename = folder.split("/")[-1]
#print folder_basename
self.store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.GUI_current_folder = folder
#print store
if len(self.store)>0:
self.list_store.clear()
for i in self.store:
self.list_store.append([i])
self.TVcolumn.set_title(folder_basename)
else:
pass
else:
pass
dialog.destroy()
def folder_update(self, w):
folder = self.GUI_current_folder
if folder is not os.getcwd():
store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.store=[]
self.list_store.clear()
for i in store:
self.list_store.append([i])
self.store.append(i)
def arbitrary_line_cut(self, x, y):
#**** num: integer - number of points to be extracted
#**** convert Q coordinates to pixel coordinates
x0, y0 = xu.analysis.line_cuts.getindex(x[0], y[0], self.gridder.xaxis, self.gridder.yaxis)
x1, y1 = xu.analysis.line_cuts.getindex(x[1], y[1], self.gridder.xaxis, self.gridder.yaxis)
num = int(np.hypot(x1-x0, y1-y0)) #number of points that will be plotted
xi, yi = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
profiles_data_X = profiles_data_Y = scipy.ndimage.map_coordinates(self.gridder.data, np.vstack((xi,yi)))
coor_X_export,coor_Y_export = np.linspace(x[0], x[1], num), np.linspace(y[0], y[1], num)
#coor_X_export = np.sort(coor_X_export)
#coor_Y_export = np.sort(coor_Y_export)
return coor_X_export,coor_Y_export, profiles_data_X, profiles_data_Y
def boundary_rectangles(self, x, y):
"""
IN : x[0,1], y[0,1]: positions of the line cut (arbitrary direction)
OUT: ROI rectangle: the rectangle in which the data will be taken
Bound rectangle: the limit values for Qx, Qz line cuts (min, max)
"""
x = np.asarray(x)
y = np.asarray(y)
alpha = np.arctan(abs((y[1]-y[0])/(x[1]-x[0]))) # inclined angle of the ROI w.r.t the horizontal line. Attention to the sign of alpha
#print np.degrees(alpha)
T = self.largueur_int/2.
if np.degrees(alpha)>55.0:
inc_x = 1
inc_y = 0
else:
inc_x = 0
inc_y = 1
y1 = y + T*inc_y
y2 = y - T*inc_y
x1 = x + T*inc_x
x2 = x - T*inc_x
#These positions are in reciprocal space units. The boundary order will be: 1-2-2-1
roi_rect = [[y1[0],x1[0]],[y2[0],x2[0]],[y2[1],x2[1]],[y1[1],x1[1]],[y1[0],x1[0]]]
roi_rect = path.Path(roi_rect)
#***************** Get the corresponding index of these points ***************************
i1,j1 = xu.analysis.line_cuts.getindex(x1[0], y1[0], self.gridder.xaxis, self.gridder.yaxis)
i2,j2 = xu.analysis.line_cuts.getindex(x2[0], y2[0], self.gridder.xaxis, self.gridder.yaxis)
i3,j3 = xu.analysis.line_cuts.getindex(x2[1], y2[1], self.gridder.xaxis, self.gridder.yaxis)
i4,j4 = xu.analysis.line_cuts.getindex(x1[1], y1[1], self.gridder.xaxis, self.gridder.yaxis)
roi_box = [[j1,i1],[j2,i2],[j3,i3],[j4,i4],[j1,i1]]
roi_box = path.Path(roi_box)
#******* Calculate the limit boundary rectangle
y_tmp = np.vstack((y1, y2))
x_tmp = np.vstack((x1, x2))
y_min = y_tmp.min()
y_max = y_tmp.max()
x_min = x_tmp.min()
x_max = x_tmp.max()
bound_rect = [x_min, x_max, y_min, y_max]
bound_rect = np.asarray(bound_rect)
contours = roi_rect.vertices
p=self.ax.plot(contours[:,1], contours[:,0], linewidth=1.5, color='white')
self.polygons.append(p[0])
self.canvas.draw()
return roi_box, bound_rect
def extract_roi_data(self, roi_box, bound_rect):
#***** Extraction of the ROI defined by the ROI box ******************
qx_min = bound_rect[0]
qx_max = bound_rect[1]
qz_min = bound_rect[2]
qz_max = bound_rect[3]
#***** Getting index of the boundary points in order to calculate the length of the extracted array
ixmin, izmin = xu.analysis.line_cuts.getindex(qx_min, qz_min, self.gridder.xaxis, self.gridder.yaxis)
ixmax, izmax = xu.analysis.line_cuts.getindex(qx_max, qz_max, self.gridder.xaxis, self.gridder.yaxis)
x_steps = ixmax - ixmin +1
z_steps = izmax - izmin +1
qx_coor = np.linspace(qx_min, qx_max, x_steps)
qz_coor = np.linspace(qz_min, qz_max, z_steps)
ROI = np.zeros(shape=(x_steps))
#****** Extract Qx line cuts ************************
for zi in range(izmin, izmax+1):
qx_int = self.gridder.data[ixmin:ixmax+1,zi]
#****** if the point is inside the ROI box: point = 0
inpoints = []
for i in range(ixmin,ixmax+1):
inpoint= roi_box.contains_point([zi,i])
inpoints.append(inpoint)
for b in range(len(inpoints)):
if inpoints[b]==False:
qx_int[b] = 0
ROI = np.vstack((ROI, qx_int))
ROI = np.delete(ROI, 0, 0) #Delete the first line which contains zeros
#****** Sum them up! Return Qx, Qz projection zones and Qx,Qz intensity
qx_ROI = ROI.sum(axis=0)/ROI.shape[0]
qz_ROI = ROI.sum(axis=1)/ROI.shape[1]
return qx_coor, qx_ROI, qz_coor, qz_ROI
def plot_profiles(self, x, y, cross_line=True):
if cross_line:
"""Drawing lines where I want to plot profiles"""
# ******** if this is not an arbitrary profile, x and y are not lists but just one individual point
x=x[0]
y=y[0]
hline = self.ax.axhline(y, color='k', ls='--', lw=1)
self.lines.append(hline)
vline = self.ax.axvline(x, color='k', ls='--', lw=1)
self.lines.append(vline)
"""Getting data to be plotted"""
self.coor_X_export, self.profiles_data_X = xu.analysis.line_cuts.get_qx_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, y, qrange=self.largueur_int)
self.coor_Y_export, self.profiles_data_Y = xu.analysis.line_cuts.get_qz_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, x, qrange=self.largueur_int)
xc = x
yc = y
""" Fitting information """
ix,iy = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
ix_left,iy = xu.analysis.line_cuts.getindex(x-self.fitting_width, y, self.gridder.xaxis, self.gridder.yaxis)
qx_2_fit = self.coor_X_export[ix_left:ix*2-ix_left+1]
qx_int_2_fit = self.profiles_data_X[ix_left:2*ix-ix_left+1]
X_fitted_params, X_fitted_data = fit(qx_2_fit, qx_int_2_fit,xc, cross_line)
####################axX.plot(qx_2_fit, qx_fit_data, color='red',linewidth=2)
ix,iy_down = xu.analysis.line_cuts.getindex(x, y-self.fitting_width, self.gridder.xaxis, self.gridder.yaxis)
qz_2_fit = self.coor_Y_export[iy_down:iy*2-iy_down+1]
qz_int_2_fit = self.profiles_data_Y[iy_down:iy*2-iy_down+1]
Y_fitted_params, Y_fitted_data = fit(qz_2_fit, qz_int_2_fit,yc, cross_line)
####################axY.plot(qz_2_fit, qz_fit_data, color='red',linewidth=2)
else:
#**** extract arbitrary line cut
#**** extract one single line cut:
if not self.rectangle_profiles_btn.get_active():
self.coor_X_export, self.coor_Y_export, self.profiles_data_X, self.profiles_data_Y = self.arbitrary_line_cut(x,y)
else:
roi_box,bound_rect = self.boundary_rectangles(x,y)
self.coor_X_export, self.profiles_data_X, self.coor_Y_export, self.profiles_data_Y = self.extract_roi_data(roi_box, bound_rect)
tmpX = np.sort(self.coor_X_export)
tmpY = np.sort(self.coor_Y_export)
xc = tmpX[self.profiles_data_X.argmax()]
yc = tmpY[self.profiles_data_Y.argmax()]
""" Fitting information """
X_fitted_params, X_fitted_data = fit(self.coor_X_export, self.profiles_data_X, xc, not cross_line)
Y_fitted_params, Y_fitted_data = fit(self.coor_Y_export, self.profiles_data_Y, yc, not cross_line)
qx_2_fit = self.coor_X_export
qz_2_fit = self.coor_Y_export
""" Plotting profiles """
self.profiles_ax1.cla()
self.profiles_ax2.cla()
self.profiles_ax1.format_coord = self.pro_format_coord
self.profiles_ax2.format_coord = self.pro_format_coord
#self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
#self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
self.profiles_ax1.plot(self.coor_Y_export, self.profiles_data_Y, color='blue', lw=3)
self.profiles_ax1.plot(qz_2_fit, Y_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax2.plot(self.coor_X_export, self.profiles_data_X, color='blue', lw=3)
self.profiles_ax2.plot(qx_2_fit, X_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas.draw()
# Show the fitted results
self.Qz_fitted_y0.set_text("%.4f"%Y_fitted_params['y0'].value)
self.Qz_fitted_xc.set_text("%.4f"%Y_fitted_params['xc'].value)
self.Qz_fitted_A.set_text("%.4f"%Y_fitted_params['A'].value)
self.Qz_fitted_w.set_text("%.4f"%Y_fitted_params['w'].value)
self.Qz_fitted_mu.set_text("%.4f"%Y_fitted_params['mu'].value)
self.Qx_fitted_y0.set_text("%.4f"%X_fitted_params['y0'].value)
self.Qx_fitted_xc.set_text("%.4f"%X_fitted_params['xc'].value)
self.Qx_fitted_A.set_text("%.4f"%X_fitted_params['A'].value)
self.Qx_fitted_w.set_text("%.4f"%X_fitted_params['w'].value)
self.Qx_fitted_mu.set_text("%.4f"%X_fitted_params['mu'].value)
self.profiles_refresh()
self.canvas.draw()
def draw_pointed(self, x, y, finished=False):
#if len(self.lines)>0:
# self.clear_notes()
p=self.ax.plot(x,y,'ro')
self.points.append(p[0])
if finished:
l=self.ax.plot(self.arb_lines_X, self.arb_lines_Y, '--',linewidth=1.5, color='white')
self.lines.append(l[0])
self.canvas.draw()
def profiles_refresh(self):
""" """
if self.profiles_log_btn.get_active():
self.profiles_ax1.set_yscale('log')
self.profiles_ax2.set_yscale('log')
else:
self.profiles_ax1.set_yscale('linear')
self.profiles_ax2.set_yscale('linear')
self.profiles_canvas.draw()
#return
def profiles_update(self, widget):
self.profiles_refresh()
def profiles_export(self,widget):
""" Export X,Y profiles data in the same folder as the EDF image """
proX_fname = self.rsm.split(".")[0]+"_Qx_profile.dat"
proY_fname = self.rsm.split(".")[0]+"_Qz_profile.dat"
proX_export= np.vstack([self.coor_X_export, self.profiles_data_X])
proX_export=proX_export.T
proY_export= np.vstack([self.coor_Y_export, self.profiles_data_Y])
proY_export=proY_export.T
try:
np.savetxt(proX_fname, proX_export)
np.savetxt(proY_fname, proY_export)
self.popup_info('info','Data are successfully exported!')
except:
self.popup_info('error','ERROR! Data not exported!')
def on_press(self, event):
#******************** Plot X,Y cross profiles ***************************************************
if (event.inaxes == self.ax) and (event.button==3) and self.plotXYprofiles_btn.get_active():
x = event.xdata
y = event.ydata
xx=[]
yy=[]
xx.append(x)
yy.append(y)
self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
self.plot_profiles(xx,yy,cross_line=True)
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
#******************** Plot arbitrary profiles ***************************************************
elif (event.inaxes == self.ax) and (event.button==1) and (self.arbitrary_profiles_btn.get_active() or self.rectangle_profiles_btn.get_active()):
#self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
self.arb_line_points +=1
#print "Number of points clicked: ",self.arb_line_points
if self.arb_line_points>2:
self.clear_notes()
self.arb_line_points=1
x = event.xdata
y = event.ydata
self.arb_lines_X.append(x)
self.arb_lines_Y.append(y)
if len(self.arb_lines_X)<2:
finished=False
elif len(self.arb_lines_X)==2:
finished = True
self.draw_pointed(x,y,finished)#If finished clicking, connect the two points by a line
if finished:
self.plot_profiles(self.arb_lines_X, self.arb_lines_Y, cross_line=False)
self.arb_lines_X=[]
self.arb_lines_Y=[]
#self.canvas.draw()
#******************** Clear cross lines in the main image ****************************************
elif event.button==2:
self.clear_notes()
def profile_press(self, event):
""" Calculate thickness fringes """
if event.inaxes == self.profiles_ax1:
draw_fringes = True
ax = self.profiles_ax1
X_data = self.coor_Y_export
Y_data = self.profiles_data_Y
xlabel = r'$Q_z (nm^{-1})$'
title = "Linear regression of Qz fringes"
title_FFT = "Fast Fourier Transform of Qz profiles"
xlabel_FFT= "Period (nm)"
elif event.inaxes == self.profiles_ax2:
draw_fringes = True
ax = self.profiles_ax2
X_data = self.coor_X_export
Y_data = self.profiles_data_X
xlabel = r'$Q_x (nm^{-1})$'
title = "Linear regression of Qx fringes"
title_FFT = "Fast Fourier Transform of Qx profiles"
xlabel_FFT= "Period (nm)"
else:
draw_fringes = False
if draw_fringes and (event.button==1):
if len(self.profiles_fringes)>0:
self.profiles_fringes = np.asarray(self.profiles_fringes)
self.profiles_fringes = np.sort(self.profiles_fringes)
fringes_popup = PopUpFringes(self.profiles_fringes, xlabel, "Fringes order", title)
self.profiles_fringes=[]
self.clear_notes()
elif draw_fringes and (event.button == 3):
vline=ax.axvline(event.xdata, linewidth=2, color="green")
self.lines.append(vline)
self.profiles_fringes.append(event.xdata)
elif draw_fringes and event.button == 2:
XF,YF = Fourier(X_data, Y_data)
popup_window=PopUpImage(XF, YF, xlabel_FFT, "Normalized intensity", title_FFT)
self.profiles_canvas.draw()
#plt.clf()
def clear_notes(self):
"""
print "Number of notes: ",len(self.my_notes)
print "Number of lines: ",len(self.lines)
print "Number of points: ",len(self.points)
print "Number of polygons: ",len(self.polygons)
"""
if len(self.my_notes)>0:
for txt in self.my_notes:
txt.remove()
if len(self.lines)>0:
for line in self.lines:
line.remove()
if len(self.points)>0:
for p in self.points:
p.remove()
if len(self.polygons)>0:
for p in self.polygons:
p.remove()
self.canvas.draw()
self.my_notes = []
#self.profiles_notes = []
self.lines=[]
self.points=[]
self.polygons=[]
self.arb_lines_X=[]
self.arb_lines_Y=[]
self.arb_line_points = 0
def on_motion(self,event):
print "Mouse moved !"
if event.inaxes == self.ax and self.arbitrary_profiles_btn.get_active() and self.arb_line_points==1:
x = event.xdata
y = event.ydata
self.clear_notes()
line = self.ax.plot([self.arb_lines_X[0], x], [self.arb_lines_Y[0],y], 'ro-')
self.lines.append(line)
self.canvas.draw()
def on_release(self, event):
if event.inaxes == self.ax:
if self.mouse_moved==True:
self.mouse_moved = False
def popup_info(self,info_type,text):
""" info_type = WARNING, INFO, QUESTION, ERROR """
if info_type.upper() == "WARNING":
mess_type = gtk.MESSAGE_WARNING
elif info_type.upper() == "INFO":
mess_type = gtk.MESSAGE_INFO
elif info_type.upper() == "ERROR":
mess_type = gtk.MESSAGE_ERROR
elif info_type.upper() == "QUESTION":
mess_type = gtk.MESSAGE_QUESTION
self.warning=gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, mess_type, gtk.BUTTONS_CLOSE,text)
self.warning.run()
self.warning.destroy()
#********************************************************************
# Functions for the Spec-HDF5 data conversion
#********************************************************************
def select_file(self,widget,path,label):
dialog = gtk.FileChooserDialog("Select file",None,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response = dialog.run()
if response == gtk.RESPONSE_OK:
file_choosen = dialog.get_filename()
path.set_text(file_choosen)
self.DATA_current_folder = os.path.dirname(file_choosen)
if label == "A":
self.attenuation_file = file_choosen.decode('utf8')
elif label == "S":
self.spec_file = file_choosen.decode('utf8')
elif label == "M":
self.mca_file = file_choosen.decode('utf8')
else:
pass
dialog.destroy()
def select_folder(self, widget, path, label):
dialog = gtk.FileChooserDialog(title="Select folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
path.set_text(folder)
self.DATA_current_folder = folder.decode('utf8')
if label == "D":
self.des_folder = folder.decode('utf8')
else:
pass
dialog.destroy()
def HKL2Q(self,H,K,L,a):
""" Q// est dans la direction [110], Qz // [001]"""
Qx = H*np.sqrt(2.)/a
Qy = K*np.sqrt(2.)/a
Qz = L/a
return [Qx, Qy, Qz]
def loadAmap(self,scanid,specfile,mapData,retard):
try:
psdSize = float(self.t1_entry.get_text())
Nchannels = int(self.t2_entry.get_text())
psdMin = int(self.t5_entry1.get_text())
psdMax = int(self.t5_entry2.get_text())
psd0 = float(self.t3_entry.get_text())
pixelSize = psdSize/Nchannels
pixelPerDeg = float(self.t4_entry.get_text())
distance = pixelSize * pixelPerDeg / np.tan(np.radians(1.0)) # sample-detector distance in mm
psdor = self.t6_entry.get_active() #psd orientation (up, down, in, out)
if psdor == 0:
psdor = 'z+'
elif psdor == 1:
psdor = 'z-'
else:
psdor = 'unknown'
energy = float(self.e2_entry.get_text())
filter_data = self.attenuation_file
monitor_col = self.e5_entry.get_text()
foil_col = self.e4_entry.get_text()
monitor_ref = float(self.e6_entry.get_text())
#****************** Calculation ************************
headers, scan_kappa = SP.ReadSpec(specfile,scanid)
Eta = scan_kappa['Eta']
print Eta.shape
tth = headers['P'][0]
omega = headers['P'][1]
tth = float(tth)
omega = float(omega)
print "Del: %.2f, Eta: %.2f"%(tth,omega)
#Si = xu.materials.Si
hxrd = xu.HXRD(self.substrate.Q(self.in_plane), self.substrate.Q(self.out_of_plane), en = energy)
hxrd.Ang2Q.init_linear(psdor,psd0, Nchannels, distance=distance, pixelwidth=pixelSize, chpdeg=pixelPerDeg)
HKL = hxrd.Ang2HKL(omega, tth)
HKL = np.asarray(HKL)
HKL = HKL.astype(int)
print "HKL = ",HKL
H=K=L=np.zeros(shape=(0,Nchannels))
for i in range(len(Eta)):
om=Eta[i]
q=hxrd.Ang2HKL(om,tth,mat=self.substrate,dettype='linear')
H = np.vstack((H,q[0]))
K = np.vstack((K,q[1]))
L = np.vstack((L,q[2]))
filtre_foil = scan_kappa[foil_col]
filtre = filtre_foil.copy()
monitor= scan_kappa[monitor_col]
foil_data = np.loadtxt(filter_data)
for f in xrange(foil_data.shape[0]):
coef = filtre_foil == f
filtre[coef] = foil_data[f,1]
#print filtre
mapData = mapData + 1e-6
if retard:
for i in range(len(filtre)-1):
mapData[i+1] = mapData[i+1]*filtre[i]
else:
for i in range(len(filtre)):
mapData[i] = mapData[i]*filtre[i]
for i in range(len(monitor)):
mapData[i] = mapData[i]*monitor_ref/monitor[i]
mapData = mapData[:,psdMin:psdMax]
H = H[:,psdMin:psdMax]
K = K[:,psdMin:psdMax]
L = L[:,psdMin:psdMax]
########## Correction d'offset ###############
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = HKL[0] - H_sub
K_offset = HKL[1] - K_sub
L_offset = HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
Q = self.HKL2Q(H, K, L, a)
return Q,mapData
except:
self.popup_info("warning", "Please make sure that you have correctly entered the all parameters.")
return None,None
def gtk_waiting(self):
while gtk.events_pending():
gtk.main_iteration()
def Change_Lab_Instrument(self, widget):
self.choosen_instrument = self.Instrument.get_active_text()
print "I choose ",self.choosen_instrument
if self.choosen_instrument == "Bruker":
self.XRDML_xrdml_file_txt.set_text("Select RAW file: ")
self.XRDML_xrdml_file_browse.set_label("Browse RAW file")
elif self.choosen_instrument == "PANalytical":
self.XRDML_xrdml_file_txt.set_text("Select XRDML file: ")
self.XRDML_xrdml_file_browse.set_label("Browse XRDML file")
def Convert_Lab_Source(self, widget):
print "Instrument chosen: ",self.choosen_instrument
energy = self.XRDML_energy.get_text()
if energy == "":
energy = 8048
else:
energy = float(energy)
self.lam = xu.lam2en(energy)/10
HKL = self.XRDML_reflection.get_text()
if HKL == "":
self.offset_correction = False
else:
self.offset_correction = True
HKL = HKL.split()
HKL = np.asarray([int(i) for i in HKL])
self.HKL = HKL
substrate = self.XRDML_substrate.get_active_text()
if substrate == "-- other":
substrate = self.XRDML_substrate_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
in_plane = self.XRDML_substrate_inplane.get_text()
out_of_plane = self.XRDML_substrate_outplane.get_text()
if in_plane != "" and out_of_plane != "":
in_plane = in_plane.split()
self.in_plane = np.asarray([int(i) for i in in_plane])
out_of_plane = out_of_plane.split()
self.out_of_plane = np.asarray([int(i) for i in out_of_plane])
self.has_orientation_matrix = True
self.experiment = xu.HXRD(self.substrate.Q(self.in_plane),self.substrate.Q(self.out_of_plane), en=energy)
else:
self.has_orientation_matrix = False
self.experiment = xu.HXRD(self.substrate.Q(1,1,0),self.substrate.Q(0,0,1), en=energy)
if self.choosen_instrument == "Bruker":
self.Bruker2HDF()
elif self.choosen_instrument == "PANalytical":
self.XRDML2HDF()
def XRDML2HDF(self):
try:
xrdml_file = self.spec_file
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading XRDML data ...")
self.gtk_waiting()
dataFile = xu.io.XRDMLFile(xrdml_file)
scan = dataFile.scan
omega_exp = scan['Omega']
tth_exp = scan['2Theta']
data = scan['detector']
if self.has_orientation_matrix:
omega,tth,psd = xu.io.getxrdml_map(xrdml_file)
[qx,qy,qz] = self.experiment.Ang2Q(omega, tth)
mapData = psd.reshape(data.shape)
H = qy.reshape(data.shape)
K = qy.reshape(data.shape)
L = qz.reshape(data.shape)
else:
mapData = data
psi = omega_exp - tth_exp/2.
Qmod= 2.*np.sin(np.radians(tth_exp/2.))/self.lam
Qx = Qmod * np.sin(np.radians(psi))
Qz = Qmod * np.cos(np.radians(psi))
H=K = Qx*a/np.sqrt(2.0)
L = Qz*a
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = self.HKL[0] - H_sub
K_offset = self.HKL[1] - K_sub
L_offset = self.HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
Q = self.HKL2Q(H, K, L, a)
self.XRDML_show_info.set_text("XRDML data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "XRDML_Map"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=mapData, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Bruker2HDF(self):
try:
raw_file = self.spec_file
from MCA_GUI.Bruker import convert_raw_to_uxd,get_Bruker
uxd_file = raw_file.split(".")[0]+".uxd"
convert_raw_to_uxd(raw_file, uxd_file)
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading Raw data ...")
self.gtk_waiting()
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
dataset = get_Bruker(uxd_file)
theta = dataset['omega']
dTheta = dataset['tth']
Qhkl = self.experiment.Ang2HKL(theta, dTheta)
Qx,Qy,Qz = Qhkl[0],Qhkl[1],Qhkl[2]
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(dataset['data']),dataset['data'].shape)
Hsub = Qhkl[0][x,y]
Ksub = Qhkl[1][x,y]
Lsub = Qhkl[2][x,y]
Qx = Qhkl[0]+self.HKL[0]-Hsub
Qy = Qhkl[1]+self.HKL[1]-Ksub
Qz = Qhkl[2]+self.HKL[2]-Lsub
Q = self.HKL2Q(Qx, Qy, Qz, a)
self.XRDML_show_info.set_text("Raw data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "RSM"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=dataset['data'], compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def spec2HDF(self,widget):
try:
specfile = self.spec_file
mcafile = self.mca_file
scan_beg = int(self.c4_entry1.get_text())
scan_end = int(self.c4_entry2.get_text())
substrate = self.e1_entry.get_active_text()
if substrate == "-- other":
substrate = self.e1_entry_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
scanid = range(scan_beg, scan_end+1)
self.show_info.set_text("Reading MCA data ...")
self.gtk_waiting()
allMaps = SP.ReadMCA2D_complete(mcafile)
description = self.c5_entry1.get_text()
retard = self.c6_entry.get_active()
total = len(allMaps)
total_maps_loaded = "Number of map(s) loaded: %d"%total
self.show_info.set_text(total_maps_loaded)
self.gtk_waiting()
if description == "":
no_description = True
else:
description = description.split(",")
no_description = False
for i in range(len(allMaps)):
scannumber = scanid[i]
scan_name = "Scan_%d"%scannumber
if no_description:
h5file = scan_name+".h5"
d = scan_name
else:
h5file = description[i].strip()+".h5"
d = description[i].strip()
info = "\nSaving file N# %d/%d: %s"%(i+1,total,h5file)
out_info = total_maps_loaded + info
self.show_info.set_text(out_info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
Q,mapdata = self.loadAmap(scannumber, specfile, allMaps[i], retard)
s = h5file.create_group(scan_name)
s.create_dataset('intensity', data=mapdata, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=d)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Export_HQ_Image(self, widget):
dialog = gtk.FileChooserDialog(title="Save image", action=gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
filename = self.rsm_choosen.split(".")[0] if self.rsm_choosen != "" else "Img"
dialog.set_current_name(filename+".png")
#dialog.set_filename(filename)
dialog.set_current_folder(self.GUI_current_folder)
filtre = gtk.FileFilter()
filtre.set_name("images")
filtre.add_pattern("*.png")
filtre.add_pattern("*.jpg")
filtre.add_pattern("*.pdf")
filtre.add_pattern("*.ps")
filtre.add_pattern("*.eps")
dialog.add_filter(filtre)
filtre = gtk.FileFilter()
filtre.set_name("Other")
filtre.add_pattern("*")
dialog.add_filter(filtre)
response = dialog.run()
if response==gtk.RESPONSE_OK:
#self.fig.savefig(dialog.get_filename())
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
fig = plt.figure(figsize=(10,8),dpi=100)
ax = fig.add_axes([0.12,0.2,0.7,0.7])
cax = fig.add_axes([0.85,0.2,0.03,0.7])
clabel = r'$Intensity\ (Counts\ per\ second)$'
fmt = "%d"
if self.linear_scale_btn.get_active():
clabel = r'$Log_{10}\ (Intensity)\ [arb.\ units]$'
fmt = "%.2f"
data = self.gridder.data.T
data = flat_data(data, self.vmin, self.vmax, self.linear_scale_btn.get_active())
img = ax.contourf(self.gridder.xaxis, self.gridder.yaxis, data, 100, vmin=self.vmin*1.1, vmax=self.vmax)
cb = fig.colorbar(img,cax=cax, format=fmt)
cb.set_label(clabel, fontsize=20)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.yaxis.label.set_size(20)
ax.xaxis.label.set_size(20)
ax.set_title(self.rsm_description,fontsize=20)
fig.savefig(dialog.get_filename())
plt.close()
dialog.destroy()
if __name__=="__main__":
MyMainWindow()
gtk.main()
| gpl-2.0 |
nguyentu1602/numpy | numpy/core/tests/test_multiarray.py | 5 | 221306 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# General case of possible ravel that is not contiguous but
# works and includes a 1-sized axis with non matching stride
a = a.swapaxes(-1, -2) # swap back to C-order
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
a = a.T # swap all to Fortran order
assert_(np.may_share_memory(a.ravel(order='F'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
# Test negative strides:
a = np.arange(4)[::-1].reshape(2, 2)
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# Test keeporder with weirdly strided 1-sized dims (1-d first stride)
a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
strides = list(a.strides)
strides[0] = -12
strides[-1] = 0
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), a.ravel('C'))
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
#Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = np.dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): # subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += np.dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return np.dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p'))
_view_is_safe(np.dtype('O,O'), np.dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(np.dtype('p,p'), np.dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - np.dtype(otype).itemsize+1):
d2 = np.dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = np.dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/cross_validation.py | 5 | 57208 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier)
def _check_cv(cv, X=None, y=None, classifier=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
surgebiswas/poker | PokerBots_2017/Johnny/numpy/core/fromnumeric.py | 22 | 98126 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
try:
return getattr(obj, method)(*args, **kwds)
# An AttributeError occurs if the object does not have
# such a method in its class.
# A TypeError occurs if the object does have such a method
# in its class, but its signature is not identical to that
# of NumPy's. This situation has occurred in the case of
# a downstream library like 'pandas'.
except (AttributeError, TypeError):
return _wrapit(obj, method, *args, **kwds)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__))
return put(ind, v, mode=mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _wrapfunc(a, 'transpose', axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a partitioned `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to an introsort which will switch
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
>>> b = np.arange(6)
>>> b[4] = 0
>>> b
array([0, 1, 2, 3, 0, 5])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na:
return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate((a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=(2,)).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
containing the diagonal is returned. If `a` is a matrix, a 1-D
array containing the diagonal is returned in order to maintain
backward compatibility. If the dimension of `a` is greater than
two, then an array of diagonals is returned, "packed" from
left-most dimension to right-most (e.g., if `a` is 3-D, then the
diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
the same subtype as `a`. The shape of the returned array is
``(a.size,)``. Matrices are special cased for backward
compatibility.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1 2 3 4 5 6]
>>> print(x.reshape(-1))
[1 2 3 4 5 6]
>>> print(np.ravel(x, order='F'))
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print(np.ravel(x.T))
[1 4 2 5 3 6]
>>> print(np.ravel(x.T, order='A'))
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
if type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
pass
else:
return sum(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs)
def sometrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def alltrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
return _wrapfunc(a, 'ptp', axis=axis, out=out)
def amax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
pass
else:
return amax(axis=axis, out=out, **kwargs)
return _methods._amax(a, axis=axis,
out=out, **kwargs)
def amin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
pass
else:
return amin(axis=axis, out=out, **kwargs)
return _methods._amin(a, axis=axis,
out=out, **kwargs)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
pass
else:
return prod(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in NumPy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning, stacklevel=2)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
return around(a, decimals=decimals, out=out)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
| mit |
sampadsaha5/sympy | sympy/holonomic/holonomic.py | 3 | 90995 | """Holonomic Functions and Differential Operators"""
from __future__ import print_function, division
from sympy import (symbols, Symbol, diff, S, Dummy, Order, rf, meijerint, I,
solve, limit, Float, nsimplify, gamma)
from sympy.printing import sstr
from sympy.core.compatibility import range
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.core.sympify import sympify
from sympy.simplify.hyperexpand import hyperexpand
from sympy.functions.special.hyper import hyper, meijerg
from sympy.core.numbers import NaN, Infinity, NegativeInfinity
from sympy.matrices import Matrix
from sympy.functions.elementary.exponential import exp_polar, exp
from .linearsolver import NewMatrix
from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
from .holonomicerrors import NotPowerSeriesError, NotHyperSeriesError, SingularityError, NotHolonomicError
from sympy.polys.rings import PolyElement
from sympy.polys.fields import FracElement
from sympy.polys.domains import QQ, ZZ, RR
from sympy.polys.domains.pythonrational import PythonRational
from sympy.polys.polyclasses import DMF
from sympy.polys.polyroots import roots
def DifferentialOperators(base, generator):
"""
Returns an Algebra of Differential Operators and the operator for
differentiation i.e. the `Dx` operator.
The first argument needs to be the base polynomial ring for the algebra
and the second argument must be a generator which can be either a
noncommutative Symbol or a string.
Examples
=======
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
"""
ring = DifferentialOperatorAlgebra(base, generator)
return (ring, ring.derivative_operator)
class DifferentialOperatorAlgebra(object):
"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate `Dx` and coefficients in a base ring A. It follows the
commutation rule:
Dx * a = sigma(a) * Dx + delta(a)
Where sigma: A --> A is an endomorphism and delta: A --> A is a
skew-derivation i.e. delta(ab) = delta(a) * b + sigma(a) * delta(b)
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
class DifferentialOperator(object):
"""
Differential Operators are elements of Weyl Algebra. The Operators
are defined by a list of polynomials in the base ring and the
parent ring of the Operator.
Takes a list of polynomials for each power of Dx and the
parent ring which must be an instance of DifferentialOperatorAlgebra.
A Differential Operator can be created easily using
the operator `Dx`. See examples below.
Examples
========
>>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> DifferentialOperator([0, 1, x**2], R)
(1)Dx + (x**2)Dx**2
>>> (x*Dx*x + 1 - Dx**2)**2
(2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)Dx + (x**4 - 6*x - 2)Dx**2 + (-2*x**2)Dx**3 + (1)Dx**4
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
# the parent ring for this operator
# must be an DifferentialOperatorAlgebra object
self.parent = parent
base = self.parent.base
self.x = base.gens[0] if isinstance(base.gens[0], Symbol) else base.gens[0][0]
# sequence of polynomials in x for each power of Dx
# the list should not have trailing zeroes
# represents the operator
# convert the expressions into ring elements using from_sympy
if isinstance(list_of_poly, list):
for i, j in enumerate(list_of_poly):
if not isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(sympify(j))
elif isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(base.to_sympy(j))
self.listofpoly = list_of_poly
# highest power of `Dx`
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two DifferentialOperator and returns another
DifferentialOperator instance using the commutation rule
Dx*a = a*Dx + a'
"""
listofself = self.listofpoly
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiplies a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Dx^i * b
def _mul_Dxi_b(b):
sol1 = [self.parent.base.zero]
sol2 = []
if isinstance(b, list):
for i in b:
sol1.append(i)
sol2.append(i.diff())
else:
sol1.append(self.parent.base.from_sympy(b))
sol2.append(self.parent.base.from_sympy(b).diff())
return _add_lists(sol1, sol2)
for i in range(1, len(listofself)):
# find Dx^i * b in ith iteration
listofother = _mul_Dxi_b(listofother)
# solution = solution + listofself[i] * (Dx^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return DifferentialOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(sympify(other))
sol = []
for j in self.listofpoly:
sol.append(other * j)
return DifferentialOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, DifferentialOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return DifferentialOperator(sol, self.parent)
else:
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(sympify(other))]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return DifferentialOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return DifferentialOperator([self.parent.base.one], self.parent)
# if self is `Dx`
if self.listofpoly == self.parent.derivative_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return DifferentialOperator(sol, self.parent)
# the general case
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')Dx'
continue
print_str += '(' + sstr(j) + ')' + 'Dx**' + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, DifferentialOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
def is_singular(self, x0):
"""
Checks if the differential equation is singular at x0.
"""
base = self.parent.base
return x0 in roots(base.to_sympy(self.listofpoly[-1]), self.x)
class HolonomicFunction(object):
"""
A Holonomic Function is a solution to a linear homogeneous ordinary
differential equation with polynomial coefficients. This differential
equation can also be represented by an annihilator i.e. a Differential
Operator L such that L.f = 0. For uniqueness of these functions,
initial conditions can also be provided along with the annihilator.
Holonomic functions have closure properties and thus forms a ring.
Given two Holonomic Functions f and g, their sum, product,
integral and derivative is also a Holonomic Function.
For ordinary points initial condition should be a vector of values of
the derivatives i.e. [y(x0), y'(x0), y''(x0) ...].
For regular singular points initial conditions can also be provided in this
format:
{s0: [C_0, C_1, ...], s1: [C0_0, C0_1, ...], ...}
where s0, s1, ... are the roots of indicial equation and vectors
[C_0, C_1, ...], [C0_0, C0_1, ...], ... are the corresponding intiial
terms of the associated power series. See Examples below.
To plot a Holonomic Function, one can use `.evalf()` for numerical
computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
``
import sympy.holonomic
from sympy import var, sin
import matplotlib.pyplot as plt
import numpy as np
var("x")
r = np.linspace(1, 5, 100)
y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r)
plt.plot(r, y, label="holonomic function")
plt.show()
``
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
>>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
>>> p + q # annihilator of e^x + sin(x)
HolonomicFunction((-1) + (1)Dx + (-1)Dx**2 + (1)Dx**3, x), f(0) = 1, f'(0) = 2, f''(0) = 1
>>> p * q # annihilator of e^x * sin(x)
HolonomicFunction((2) + (-2)Dx + (1)Dx**2, x), f(0) = 0, f'(0) = 1
# an example of initial conditions for regular singular points
# only one root `1/2` of the indicial equation. So ics is [(1/2, [1])]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
HolonomicFunction((-1/2) + (x)Dx, x), {1/2: [1]}
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
sqrt(x)
"""
_op_priority = 20
def __init__(self, annihilator, x, x0=0, y0=None):
"""
Takes the annihilator and variable of the function.
`x0` is the point for which initial conditions are given and
`y0` is the initial condition.
For ordinary points `y0` should be a vector of initial values
y0 = [f(x0), f'(x0), f''(x0) ...].
To make the function unique, length of the vector `y0` must be equal to or
greater than the order of differential equation.
"""
# initial condition
self.y0 = y0
# the point for initial conditions, defualt is zero.
self.x0 = x0
# differential operator L such that L.f = 0
self.annihilator = annihilator
self.x = x
def __repr__(self):
str_sol = 'HolonomicFunction(%s, %s)' % ((self.annihilator).__repr__(), sstr(self.x))
if not self._have_init_cond():
return str_sol
# printing the singular initial condition
# in valid python
elif self.is_singularics():
str_sol += ', ' + sstr(self.y0)
return str_sol
# for ordinary initial conditions
else:
cond_str = ''
diff_str = ''
for i in self.y0:
cond_str += ', f%s(%s) = %s' % (diff_str, sstr(self.x0), sstr(i))
diff_str += "'"
sol = str_sol + cond_str
return sol
__str__ = __repr__
def unify(self, other):
"""
Unifies the ground domain of a given two Holonomic
Functions.
"""
R1 = self.annihilator.parent.base
R2 = other.annihilator.parent.base
dom1 = R1.dom
dom2 = R2.dom
if R1 == R2:
return (self, other)
R = (dom1.unify(dom2)).old_poly_ring(self.x)
newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
sol1 = DifferentialOperator(sol1, newparent)
sol2 = DifferentialOperator(sol2, newparent)
sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
return (sol1, sol2)
def is_singularics(self):
"""
Returns True if the function have singular initial condition
in the dictionary format.
Returns False if the function have ordinary initial condition
in the list format.
Returns None for all other cases.
"""
if isinstance(self.y0, dict):
return True
elif isinstance(self.y0, list):
return False
def _have_init_cond(self):
"""
Checks if the function have initial condition.
"""
return bool(self.y0)
def _singularics_to_ord(self):
"""
Converts a singular initial condition to ordinary if possible.
"""
a = list(self.y0)[0]
b = self.y0[a]
if len(self.y0) == 1 and a == int(a) and a > 0:
y0 = []
a = int(a)
for i in range(a):
y0.append(S(0))
y0 += [j * factorial(a + i) for i, j in enumerate(b)]
return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
def __add__(self, other):
# if the ground domains are different
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a + b
deg1 = self.annihilator.order
deg2 = other.annihilator.order
dim = max(deg1, deg2)
R = self.annihilator.parent.base
K = R.get_field()
rowsself = [self.annihilator]
rowsother = [other.annihilator]
gen = self.annihilator.parent.derivative_operator
# constructing annihilators up to order dim
for i in range(dim - deg1):
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
for i in range(dim - deg2):
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
# constructing the matrix of the ansatz
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(0)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S(0) for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
# solving the linear system using gauss jordan solver
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# if a solution is not obtained then increasing the order by 1 in each
# iteration
while sol.is_zero:
dim += 1
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(S(0))
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S(0) for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# taking only the coefficients needed to multiply with `self`
# can be also be done the other way by taking R.H.S and multiplying with
# `other`
sol = sol[:dim + 1 - deg1]
sol1 = _normalize(sol, self.annihilator.parent)
# annihilator of the solution
sol = sol1 * (self.annihilator)
sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol, self.x)
# both the functions have ordinary initial conditions
if self.is_singularics() == False and other.is_singularics() == False:
# directly add the corresponding value
if self.x0 == other.x0:
# try to extended the initial conditions
# using the annihilator
y1 = _extend_y0(self, sol.order)
y2 = _extend_y0(other, sol.order)
y0 = [a + b for a, b in zip(y1, y2)]
return HolonomicFunction(sol, self.x, self.x0, y0)
else:
# change the intiial conditions to a same point
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self + other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) + other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self + other.change_ics(self.x0)
else:
return self.change_ics(other.x0) + other
if self.x0 != other.x0:
return HolonomicFunction(sol, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S(0):_y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S(0):_y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
# computing singular initial condition for the result
# taking union of the series terms of both functions
y0 = {}
for i in y1:
# add corresponding initial terms if the power
# on `x` is same
if i in y2:
y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
else:
y0[i] = y1[i]
for i in y2:
if not i in y1:
y0[i] = y2[i]
return HolonomicFunction(sol, self.x, self.x0, y0)
def integrate(self, limits, initcond=False):
"""
Integrate the given holonomic function. Limits can be provided,
Initial conditions can only be computed when limits are (x0, x).
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
HolonomicFunction((-1)Dx + (1)Dx**2, x), f(0) = 0, f'(0) = 1
# integrate(cos(x), (x 0, x)) = sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
HolonomicFunction((1)Dx + (1)Dx**3, x), f(0) = 0, f'(0) = 1, f''(0) = 0
"""
# to get the annihilator, just multiply by Dx from right
D = self.annihilator.parent.derivative_operator
# if the function have initial conditions of the series format
if self.is_singularics() == True:
r = self._singularics_to_ord()
if r:
return r.integrate(limits, initcond=initcond)
# computing singular initial condition for the function
# produced after integration.
y0 = {}
for i in self.y0:
c = self.y0[i]
c2 = []
for j in range(len(c)):
if c[j] == 0:
c2.append(S(0))
# if power on `x` is -1, the integration becomes log(x)
# TODO: Implement this case
elif i + j + 1 == 0:
raise NotImplementedError("logarithmic terms in the series are not supported")
else:
c2.append(c[j] / S(i + j + 1))
y0[i + 1] = c2
if hasattr(limits, "__iter__"):
raise NotImplementedError("Definite integration for singular initial conditions")
return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
# if no initial conditions are available for the function
if not self._have_init_cond():
if initcond:
return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S(0)])
return HolonomicFunction(self.annihilator * D, self.x)
# definite integral
# initial conditions for the answer will be stored at point `a`,
# where `a` is the lower limit of the integrand
if hasattr(limits, "__iter__"):
if len(limits) == 3 and limits[0] == self.x:
x0 = self.x0
a = limits[1]
b = limits[2]
else:
x0 = self.x0
a = self.x0
b = self.x
if x0 == a:
y0 = [S(0)]
y0 += self.y0
# use evalf to get the values at `a`
else:
y0 = [S(0)]
tempy0 = self.change_ics(a).y0
y0 += tempy0
# if the upper limit is `x`, the answer will be a function
if b == self.x:
return HolonomicFunction(self.annihilator * D, self.x, a, y0)
# if the upper limits is a Number, a numerical value will be returned
elif S(b).is_Number:
try:
s = HolonomicFunction(self.annihilator * D, self.x, a,\
y0).to_expr()
indefinite = s.subs(self.x, b)
if not isinstance(indefinite, NaN):
return indefinite
else:
return s.limit(self.x, b)
except (NotHyperSeriesError, NotPowerSeriesError):
return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
return HolonomicFunction(self.annihilator * D, self.x)
def diff(self, *args):
"""
Differentiation of the given Holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
# derivative of sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
cos(x)
# derivative of e^2*x
>>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
2*exp(2*x)
See Also
=======
.integrate()
"""
if args:
if args[0] != self.x:
return S(0)
elif len(args) == 2:
sol = self
for i in range(args[1]):
sol = sol.diff(args[0])
return sol
ann = self.annihilator
dx = ann.parent.derivative_operator
# if the function is constant.
if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
return S(0)
# if the coefficient of y in the differential equation is zero.
# a shifting is done to compute the answer in this case.
elif ann.listofpoly[0] == ann.parent.base.zero:
sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
if self._have_init_cond():
# if ordinary initial condition
if self.is_singularics() == False:
return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
# TODO: support for singular initial condition
return HolonomicFunction(sol, self.x)
else:
return HolonomicFunction(sol, self.x)
# the general algorithm
R = ann.parent.base
K = R.get_field()
seq_dmf = [K.new(i.rep) for i in ann.listofpoly]
# -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
rhs.insert(0, K.zero)
# differentiate both lhs and rhs
sol = _derivate_diff_eq(rhs)
# add the term y' in lhs to rhs
sol = _add_lists(sol, [K.zero, K.one])
sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
if not self._have_init_cond() or self.is_singularics() == True:
return HolonomicFunction(sol, self.x)
y0 = _extend_y0(self, sol.order + 1)[1:]
return HolonomicFunction(sol, self.x, self.x0, y0)
def __eq__(self, other):
if self.annihilator == other.annihilator:
if self.x == other.x:
if self._have_init_cond() and other._have_init_cond():
if self.x0 == other.x0 and self.y0 == other.y0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
def __mul__(self, other):
ann_self = self.annihilator
if not isinstance(other, HolonomicFunction):
other = sympify(other)
if not other.is_constant():
raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
if not self._have_init_cond():
return self
else:
y0 = _extend_y0(self, ann_self.order)
y1 = []
for j in y0:
y1.append(j * other)
return HolonomicFunction(ann_self, self.x, self.x0, y1)
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a * b
ann_other = other.annihilator
list_self = []
list_other = []
a = ann_self.order
b = ann_other.order
R = ann_self.parent.base
K = R.get_field()
for j in ann_self.listofpoly:
list_self.append(K.new(j.rep))
for j in ann_other.listofpoly:
list_other.append(K.new(j.rep))
# will be used to reduce the degree
self_red = [-list_self[i] / list_self[a] for i in range(a)]
other_red = [-list_other[i] / list_other[b] for i in range(b)]
# coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
coeff_mul = [[S(0) for i in range(b + 1)] for j in range(a + 1)]
coeff_mul[0][0] = S(1)
# making the ansatz
lin_sys = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
homo_sys = [[S(0) for q in range(a * b)]]
homo_sys = NewMatrix(homo_sys).transpose()
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
# until a non trivial solution is found
while sol[0].is_zero:
# updating the coefficents Dx^i(f).Dx^j(g) for next degree
for i in range(a - 1, -1, -1):
for j in range(b - 1, -1, -1):
coeff_mul[i][j + 1] += coeff_mul[i][j]
coeff_mul[i + 1][j] += coeff_mul[i][j]
if isinstance(coeff_mul[i][j], K.dtype):
coeff_mul[i][j] = DMFdiff(coeff_mul[i][j])
else:
coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
# reduce the terms to lower power using annihilators of f, g
for i in range(a + 1):
if not coeff_mul[i][b] == S(0):
for j in range(b):
coeff_mul[i][j] += other_red[j] * \
coeff_mul[i][b]
coeff_mul[i][b] = S(0)
# not d2 + 1, as that is already covered in previous loop
for j in range(b):
if not coeff_mul[a][j] == 0:
for i in range(a):
coeff_mul[i][j] += self_red[i] * \
coeff_mul[a][j]
coeff_mul[a][j] = S(0)
lin_sys.append([coeff_mul[i][j] for i in range(a)
for j in range(b)])
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
sol_ann = _normalize(sol[0][0:], self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol_ann, self.x)
if self.is_singularics() == False and other.is_singularics() == False:
# if both the conditions are at same point
if self.x0 == other.x0:
# try to find more inital conditions
y0_self = _extend_y0(self, sol_ann.order)
y0_other = _extend_y0(other, sol_ann.order)
# h(x0) = f(x0) * g(x0)
y0 = [y0_self[0] * y0_other[0]]
# coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
for i in range(1, min(len(y0_self), len(y0_other))):
coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
for j in range(i + 1):
for k in range(i + 1):
if j + k == i:
coeff[j][k] = binomial(i, j)
sol = 0
for j in range(i + 1):
for k in range(i + 1):
sol += coeff[j][k]* y0_self[j] * y0_other[k]
y0.append(sol)
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
# if the points are different, consider one
else:
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self * other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) * other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self * other.change_ics(self.x0)
else:
return self.change_ics(other.x0) * other
if self.x0 != other.x0:
return HolonomicFunction(sol_ann, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S(0):_y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S(0):_y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
y0 = {}
# multiply every possible pair of the series terms
for i in y1:
for j in y2:
k = min(len(y1[i]), len(y2[j]))
c = []
for a in range(k):
s = S(0)
for b in range(a + 1):
s += y1[i][b] * y2[j][a - b]
c.append(s)
if not i + j in y0:
y0[i + j] = c
else:
y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
__rmul__ = __mul__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n < 0:
raise NotHolonomicError("Negative Power on a Holonomic Function")
if n == 0:
return S(1)
if n == 1:
return self
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def degree(self):
"""
Returns the highest power of `x` in the annihilator.
"""
sol = [i.degree() for i in self.annihilator.listofpoly]
return max(sol)
def composition(self, expr, *args, **kwargs):
"""
Returns the annihilator after composition of a holonomic function with
an algebraic function. Initial conditions for the annihilator after
composition can be also be provided to the function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
HolonomicFunction((-2*x) + (1)Dx, x), f(0) = 1
>>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
HolonomicFunction((4*x**3) + (-1)Dx + (x)Dx**2, x), f(1) = 1, f'(1) = 0
See Also
========
from_hyper
"""
R = self.annihilator.parent
a = self.annihilator.order
diff = expr.diff(self.x)
listofpoly = self.annihilator.listofpoly
for i, j in enumerate(listofpoly):
if isinstance(j, self.annihilator.parent.base.dtype):
listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
r = listofpoly[a].subs({self.x:expr})
subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
coeffs = [S(0) for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
coeffs[0] = S(1)
system = [coeffs]
homogeneous = Matrix([[S(0) for i in range(a)]]).transpose()
sol = S(0)
while sol.is_zero:
coeffs_next = [p.diff(self.x) for p in coeffs]
for i in range(a - 1):
coeffs_next[i + 1] += (coeffs[i] * diff)
for i in range(a):
coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
coeffs = coeffs_next
# check for linear relations
system.append(coeffs)
sol_tuple = (Matrix(system).transpose()).gauss_jordan_solve(homogeneous)
sol = sol_tuple[0]
tau = sol.atoms(Dummy).pop()
sol = sol.subs(tau, 1)
sol = _normalize(sol[0:], R, negative=False)
# if initial conditions are given for the resulting function
if args:
return HolonomicFunction(sol, self.x, args[0], args[1])
return HolonomicFunction(sol, self.x)
def to_sequence(self, lb=True):
"""
Finds the recurrence relation in power series expansion
of the function about `x0`, where `x0` is the point at which
initial conditions are given.
If the point `x0` is ordinary, solution of the form [(R, n0)]
is returned. Where `R` is the recurrence relation and `n0` is the
smallest `n` for which the recurrence holds true.
If the point `x0` is regular singular, a vector of `(R, p, n0)` is
returned, i.e. [(R, p, n0), ...]. Each tuple in this vector represents
a recurrence relation `R` associated with a root of the indicial
equation `p`. Conditions of a different format can also be provided in
this case, see the docstring of the class.
If it's not possible to numerically compute a initial condition,
it is returned as a symbol C_j, denoting the coefficient of (x - x0)^j
in the power series about x0.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
# exp(x), the recurrence relation holds for n >= 0
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
[(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
# log(1 + x), the recurrence relation holds for n >= 2
>>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
[(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
[(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
See Also
========
HolonomicFunction.series
References
==========
[1] hal.inria.fr/inria-00070025/document
[2] http://www.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
"""
if self.x0 != 0:
return self.shift_x(self.x0).to_sequence()
# check whether a power series exists if the point is singular
if self.annihilator.is_singular(self.x0):
return self._frobenius(lb=lb)
dict1 = {}
n = symbols('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
# substituting each term of the form `x^k Dx^j` in the
# annihilator, according to the formula below:
# x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
# for explanation see [2].
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k) in dict1:
dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
else:
dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = self.degree()
# the recurrence relation holds for all values of
# n greater than smallest_n, i.e. n >= smallest_n
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
# an appropriate shift of the recurrence
for j in range(lower, upper + 1):
if j in keylist:
temp = S(0)
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S(0))
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
y0 = _extend_y0(self, order)
u0 = []
# u(n) = y^n(0)/factorial(n)
for i, j in enumerate(y0):
u0.append(j / factorial(i))
# if sufficient conditions can't be computed then
# try to use the series method i.e.
# equate the coefficients of x^k in the equation formed by
# substituting the series in differential equation, to zero.
if len(u0) < order:
for i in range(degree):
eq = S(0)
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S(0)
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
def _frobenius(self, lb=True):
# compute the roots of indicial equation
indicialroots = self._indicial()
reals = []
compl = []
for i in indicialroots:
if i.is_real:
reals.extend([i] * indicialroots[i])
else:
a, b = i.as_real_imag()
compl.extend([(i, a, b)] * indicialroots[i])
# sort the roots for a fixed ordering of solution
compl.sort(key=lambda x : x[1])
compl.sort(key=lambda x : x[2])
reals.sort()
x = self.x
# grouping the roots, roots differ by an integer are put in the same group.
grp = []
for i in reals:
intdiff = False
if len(grp) == 0:
grp.append([i])
continue
for j in grp:
if int(j[0] - i) == j[0] - i:
j.append(i)
intdiff = True
break
if not intdiff:
grp.append([i])
# True if none of the roots differ by an integer i.e.
# each element in group have only one member
independent = True if all(len(i) == 1 for i in grp) else False
allpos = all(i >= 0 for i in reals)
allint = all(int(i) == i for i in reals)
# if initial conditions are provided
# then use them.
if self.is_singularics() == True:
rootstoconsider = []
for i in self.y0:
for j in indicialroots:
if j == i:
rootstoconsider.append(i)
elif allpos and allint:
rootstoconsider = [min(reals)]
elif independent:
rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
elif not allint:
rootstoconsider = []
for i in reals:
if not int(i) == i:
rootstoconsider.append(i)
elif not allpos:
if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
rootstoconsider = [min(reals)]
else:
posroots = []
for i in reals:
if i >= 0:
posroots.append(i)
rootstoconsider = [min(posroots)]
n = symbols('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
finalsol = []
char = ord('C')
for p in rootstoconsider:
dict1 = {}
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k - i) in dict1:
dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
else:
dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = max([i[1] for i in dict1])
degree2 = min([i[1] for i in dict1])
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
for j in range(lower, upper + 1):
if j in keylist:
temp = S(0)
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S(0))
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
u0 = []
if self.is_singularics() == True:
u0 = self.y0[p]
elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
y0 = _extend_y0(self, order + int(p))
# u(n) = y^n(0)/factorial(n)
if len(y0) > int(p):
for i in range(int(p), len(y0)):
u0.append(y0[i] / factorial(i))
if len(u0) < order:
for i in range(degree2, degree):
eq = S(0)
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S(0)
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
letter = chr(char) + '_%s' %(i + j[0])
dummys[i + j[0]] = Symbol(letter)
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
continue
else:
finalsol.append((HolonomicSequence(sol, u0), p))
continue
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
else:
finalsol.append((HolonomicSequence(sol, u0), p))
char += 1
return finalsol
def series(self, n=6, coefficient=False, order=True, _recur=None):
"""
Finds the power series expansion of given holonomic function about x0.
A list of series might be returned if `x0` is a regular point with
multiple roots of the indcial equation.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
See Also
========
HolonomicFunction.to_sequence
"""
if _recur == None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = []
for i in recurrence:
sol.append(self.series(_recur=i))
return sol
n = n - int(constantpower)
l = len(recurrence.u0) - 1
k = recurrence.recurrence.order
x = self.x
x0 = self.x0
seq_dmp = recurrence.recurrence.listofpoly
R = recurrence.recurrence.parent.base
K = R.get_field()
seq = []
for i, j in enumerate(seq_dmp):
seq.append(K.new(j.rep))
sub = [-seq[i] / seq[k] for i in range(k)]
sol = [i for i in recurrence.u0]
if l + 1 >= n:
pass
else:
# use the initial conditions to find the next term
for i in range(l + 1 - k, n - k):
coeff = S(0)
for j in range(k):
if i + j >= 0:
coeff += DMFsubs(sub[j], i) * sol[i + j]
sol.append(coeff)
if coefficient:
return sol
ser = S(0)
for i, j in enumerate(sol):
ser += x**(i + constantpower) * j
if order:
ser += Order(x**(n + int(constantpower)), x)
if x0 != 0:
return ser.subs(x, x - x0)
return ser
def _indicial(self):
"""Computes the roots of Indicial equation.
"""
if self.x0 != 0:
return self.shift_x(self.x0)._indicial()
list_coeff = self.annihilator.listofpoly
R = self.annihilator.parent.base
x = self.x
s = R.zero
y = R.one
def _pole_degree(poly):
root_all = roots(R.to_sympy(poly), x, filter='Z')
if 0 in root_all.keys():
return root_all[0]
else:
return 0
degree = [j.degree() for j in list_coeff]
degree = max(degree)
inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
deg = lambda q: inf if q.is_zero else _pole_degree(q)
b = deg(list_coeff[0])
for j in range(1, len(list_coeff)):
b = min(b, deg(list_coeff[j]) - j)
for i, j in enumerate(list_coeff):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
if - i - b <= 0 and degree - i - b >= 0:
s = s + listofdmp[degree - i - b] * y
y *= x - i
return roots(R.to_sympy(s), x)
def evalf(self, points, method='RK4', h=0.05, derivatives=False):
"""
Finds numerical value of a holonomic function using numerical methods.
(RK4 by default). A set of points (real or complex) must be provided
which will be the path for the numerical integration.
The path should be given as a list [x1, x2, ... xn]. The numerical
values will be computed at each point in this order x1 --> x2 --> x3
... --> xn.
Returns values of the function at x1, x2, ... xn in a list.
Examples
=======
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> # a straight line on the real axis from (0 to 1)
>>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
# using Runge-Kutta 4th order on e^x from 0.1 to 1.
# exact solution at 1 is 2.71828182845905
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
[1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
2.45960141378007, 2.71827974413517]
# using Euler's method for the same
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
[1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
2.357947691, 2.5937424601]
One can also observe that the value obtained using Runge-Kutta 4th order
is much more accurate than Euler's method.
"""
from sympy.holonomic.numerical import _evalf
lp = False
# if a point `b` is given instead of a mesh
if not hasattr(points, "__iter__"):
lp = True
b = S(points)
if self.x0 == b:
return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
if not b.is_Number:
raise NotImplementedError
a = self.x0
if a > b:
h = -h
n = int((b - a) / h)
points = [a + h]
for i in range(n - 1):
points.append(points[-1] + h)
for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
if i == self.x0 or i in points:
raise SingularityError(self, i)
if lp:
return _evalf(self, points, method=method, derivatives=derivatives)[-1]
return _evalf(self, points, method=method, derivatives=derivatives)
def change_x(self, z):
"""
Changes only the variable of Holonomic Function, for internal
purposes. For composition use HolonomicFunction.composition()
"""
dom = self.annihilator.parent.base.dom
R = dom.old_poly_ring(z)
parent, _ = DifferentialOperators(R, 'Dx')
sol = []
for j in self.annihilator.listofpoly:
sol.append(R(j.rep))
sol = DifferentialOperator(sol, parent)
return HolonomicFunction(sol, z, self.x0, self.y0)
def shift_x(self, a):
"""
Substitute `x + a` for `x`.
"""
x = self.x
listaftershift = self.annihilator.listofpoly
base = self.annihilator.parent.base
sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
sol = DifferentialOperator(sol, self.annihilator.parent)
x0 = self.x0 - a
if not self._have_init_cond():
return HolonomicFunction(sol, x)
return HolonomicFunction(sol, x, x0, self.y0)
def to_hyper(self, as_list=False, _recur=None):
"""
Returns a hypergeometric function (or linear combination of them)
representing the given holonomic function.
Returns an answer of the form:
a1 * x**b1 * hyper() + a2 * x**b2 * hyper() ...
This is very useful as one can now use `hyperexpand` to find the
symbolic expressions/functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
# sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
x*hyper((), (3/2,), -x**2/4)
# exp(x)
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
hyper((), (), x)
See Also
========
from_hyper, from_meijerg
"""
if _recur == None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
smallest_n = recurrence[1]
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
smallest_n = recurrence[2]
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
smallest_n = recurrence[0][1]
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
smallest_n = recurrence[0][2]
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
for i in recurrence[1:]:
sol += self.to_hyper(as_list=as_list, _recur=i)
return sol
u0 = recurrence.u0
r = recurrence.recurrence
x = self.x
x0 = self.x0
# order of the recurrence relation
m = r.order
# when no recurrence exists, and the power series have finite terms
if m == 0:
nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
sol = S(0)
for j, i in enumerate(nonzeroterms):
if i < 0 or int(i) != i:
continue
i = int(i)
if i < len(u0):
if isinstance(u0[i], (PolyElement, FracElement)):
u0[i] = u0[i].as_expr()
sol += u0[i] * x**i
else:
sol += Symbol('C_%s' %j) * x**i
if isinstance(sol, (PolyElement, FracElement)):
sol = sol.as_expr() * x**constantpower
else:
sol = sol * x**constantpower
if as_list:
if x0 != 0:
return [(sol.subs(x, x - x0), )]
return [(sol, )]
if x0 != 0:
return sol.subs(x, x - x0)
return sol
if smallest_n + m > len(u0):
raise NotImplementedError("Can't compute sufficient Initial Conditions")
# check if the recurrence represents a hypergeometric series
is_hyper = True
for i in range(1, len(r.listofpoly)-1):
if r.listofpoly[i] != r.parent.base.zero:
is_hyper = False
break
if not is_hyper:
raise NotHyperSeriesError(self, self.x0)
a = r.listofpoly[0]
b = r.listofpoly[-1]
# the constant multiple of argument of hypergeometric function
if isinstance(a.rep[0], (PolyElement, FracElement)):
c = - (S(a.rep[0].as_expr()) * m**(a.degree())) / (S(b.rep[0].as_expr()) * m**(b.degree()))
else:
c = - (S(a.rep[0]) * m**(a.degree())) / (S(b.rep[0]) * m**(b.degree()))
sol = 0
arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
# iterate thorugh the initial conditions to find
# the hypergeometric representation of the given
# function.
# The answer will be a linear combination
# of different hypergeometric series which satisfies
# the recurrence.
if as_list:
listofsol = []
for i in range(smallest_n + m):
# if the recurrence relation doesn't hold for `n = i`,
# then a Hypergeometric representation doesn't exist.
# add the algebraic term a * x**i to the solution,
# where a is u0[i]
if i < smallest_n:
if as_list:
listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
else:
sol += S(u0[i]) * x**i
continue
# if the coefficient u0[i] is zero, then the
# independent hypergeomtric series starting with
# x**i is not a part of the answer.
if S(u0[i]) == 0:
continue
ap = []
bq = []
# substitute m * n + i for n
for k in arg1:
ap.extend([nsimplify((i - k) / m)] * arg1[k])
for k in arg2:
bq.extend([nsimplify((i - k) / m)] * arg2[k])
# convention of (k + 1) in the denominator
if 1 in bq:
bq.remove(1)
else:
ap.append(1)
if as_list:
listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
else:
sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
if as_list:
return listofsol
sol = sol * x**constantpower
if x0 != 0:
return sol.subs(x, x - x0)
return sol
def to_expr(self):
"""
Converts a Holonomic Function back to elementary functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
besselj(1, x)
>>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
x*log(x + 1) + log(x + 1) + 1
"""
return hyperexpand(self.to_hyper()).simplify()
def change_ics(self, b, lenics=None):
"""
Changes the point `x0` to `b` for initial conditions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import symbols, sin, cos, exp
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x)).change_ics(1)
HolonomicFunction((1) + (1)Dx**2, x), f(1) = sin(1), f'(1) = cos(1)
>>> expr_to_holonomic(exp(x)).change_ics(2)
HolonomicFunction((-1) + (1)Dx, x), f(2) = exp(2)
"""
symbolic = True
if lenics == None and len(self.y0) > self.annihilator.order:
lenics = len(self.y0)
dom = self.annihilator.parent.base.domain
try:
sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
except (NotPowerSeriesError, NotHyperSeriesError):
symbolic = False
if symbolic and sol.x0 == b:
return sol
y0 = self.evalf(b, derivatives=True)
return HolonomicFunction(self.annihilator, self.x, b, y0)
def to_meijerg(self):
"""
Returns a linear combination of Meijer G-functions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import sin, cos, hyperexpand, log, symbols
>>> x = symbols('x')
>>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
sin(x) + cos(x)
>>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
log(x)
See Also
========
to_hyper()
"""
# convert to hypergeometric first
rep = self.to_hyper(as_list=True)
sol = S(0)
for i in rep:
if len(i) == 1:
sol += i[0]
elif len(i) == 2:
sol += i[0] * _hyper_to_meijerg(i[1])
return sol
def from_hyper(func, x0=0, evalf=False):
"""
Converts Hypergeometric Function to Holonomic.
func is the Hypergeometric Function and x0 be the point at
which initial conditions are required.
Examples
=======
>>> from sympy.holonomic.holonomic import from_hyper, DifferentialOperators
>>> from sympy import symbols, hyper, S
>>> x = symbols('x')
>>> from_hyper(hyper([], [S(3)/2], x**2/4))
HolonomicFunction((-x) + (2)Dx + (x)Dx**2, x), f(1) = sinh(1), f'(1) = -sinh(1) + cosh(1)
"""
a = func.ap
b = func.bq
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# generalized hypergeometric differential equation
r1 = 1
for i in range(len(a)):
r1 = r1 * (x * Dx + a[i])
r2 = Dx
for i in range(len(b)):
r2 = r2 * (x * Dx + b[i] - 1)
sol = r1 - r2
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
# return None if it is Infinite or NaN
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# if the function is known symbolically
if not isinstance(simp, hyper):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
# if values don't exist at 0, then try to find initial
# conditions at 1. If it doesn't exist at 1 too then
# try 2 and so on.
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, hyper):
x0 = 1
# use evalf if the function can't be simpified
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
"""
Converts a Meijer G-function to Holonomic.
func is the Hypergeometric Function and x0 be the point at
which initial conditions are required.
Examples
=======
>>> from sympy.holonomic.holonomic import from_meijerg, DifferentialOperators
>>> from sympy import symbols, meijerg, S
>>> x = symbols('x')
>>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
HolonomicFunction((1) + (1)Dx**2, x), f(0) = 0, f'(0) = 1/sqrt(pi)
"""
a = func.ap
b = func.bq
n = len(func.an)
m = len(func.bm)
p = len(a)
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
# compute the differential equation satisfied by the
# Meijer G-function.
mnp = (-1)**(m + n - p)
r1 = x * mnp
for i in range(len(a)):
r1 *= x * Dx + 1 - a[i]
r2 = 1
for i in range(len(b)):
r2 *= x * Dx - b[i]
sol = r1 - r2
if not initcond:
return HolonomicFunction(sol, x).composition(z)
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# computing initial conditions
if not isinstance(simp, meijerg):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, meijerg):
x0 = 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
x_1 = Dummy('x_1')
_lookup_table = None
domain_for_table = None
from sympy.integrals.meijerint import _mytype
def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
"""
Uses `meijerint._rewrite1` to convert to `meijerg` function and then
eventually to Holonomic Functions. Only works when `meijerint._rewrite1`
returns a `meijerg` representation of the function provided.
Examples
========
>>> from sympy.holonomic.holonomic import expr_to_holonomic
>>> from sympy import sin, exp, symbols
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x))
HolonomicFunction((1) + (1)Dx**2, x), f(0) = 0, f'(0) = 1
>>> expr_to_holonomic(exp(x))
HolonomicFunction((-1) + (1)Dx, x), f(0) = 1
See Also
========
meijerint._rewrite1, _convert_poly_rat_alg, _create_table
"""
func = sympify(func)
syms = func.free_symbols
if not x:
if len(syms) == 1:
x= syms.pop()
else:
raise ValueError("Specify the variable for the function")
elif x in syms:
syms.remove(x)
extra_syms = list(syms)
if domain == None:
if func.has(Float):
domain = RR
else:
domain = QQ
if len(extra_syms) != 0:
domain = domain[extra_syms].get_field()
# try to convert if the function is polynomial or rational
solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
if solpoly:
return solpoly
# create the lookup table
global _lookup_table, domain_for_table
if not _lookup_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
elif domain != domain_for_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
# use the table directly to convert to Holonomic
if func.is_Function:
f = func.subs(x, x_1)
t = _mytype(f, x_1)
if t in _lookup_table:
l = _lookup_table[t]
sol = l[0][1].change_x(x)
else:
sol = _convert_meijerint(func, x, initcond=False, domain=domain)
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
if y0 or not initcond:
sol = sol.composition(func.args[0])
if y0:
sol.y0 = y0
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return sol.composition(func.args[0], x0, _y0)
# iterate though the expression recursively
args = func.args
f = func.func
from sympy.core import Add, Mul, Pow
sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
if f is Add:
for i in range(1, len(args)):
sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Mul:
for i in range(1, len(args)):
sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Pow:
sol = sol**args[1]
sol.x0 = x0
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
return sol
if sol.y0:
return sol
if not lenics:
lenics = sol.annihilator.order
if sol.annihilator.is_singular(x0):
r = sol._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S(1):
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol.annihilator, x, x0, y0)
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
## Some helper functions ##
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
lcm_denom = base.from_sympy(S(1))
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.rep))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(list_of_coeff[i].numer())
# corresponding denominators
denom.append(list_of_coeff[i].denom())
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.rep)
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).rep)
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.rep)
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
frac_ans = j / gcd_numer
list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).rep)
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(DMFdiff(listofpoly[0]))
for i, j in enumerate(listofpoly[1:]):
sol.append(DMFdiff(j) + listofpoly[i])
sol.append(listofpoly[a])
return sol
def _hyper_to_meijerg(func):
"""
Converts a `hyper` to meijerg.
"""
ap = func.ap
bq = func.bq
p = len(ap)
q = len(bq)
ispoly = any(i <= 0 and int(i) == i for i in ap)
if ispoly:
return hyperexpand(func)
z = func.args[2]
# paramters of the `meijerg` function.
an = (1 - i for i in ap)
anp = ()
bm = (S(0), )
bmq = (1 - i for i in bq)
k = S(1)
for i in bq:
k = k * gamma(i)
for i in ap:
k = k / gamma(i)
return k * meijerg(an, anp, bm, bmq, -z)
def _add_lists(list1, list2):
"""Takes polynomial sequences of two annihilators a and b and returns
the list of polynomials of sum of a and b.
"""
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
def _extend_y0(Holonomic, n):
"""
Tries to find more initial conditions by substituting the initial
value point in the differential equation.
"""
if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
return Holonomic.y0
annihilator = Holonomic.annihilator
a = annihilator.order
x = Holonomic.x
listofpoly = []
y0 = Holonomic.y0
R = annihilator.parent.base
K = R.get_field()
for i, j in enumerate(annihilator.listofpoly):
if isinstance(j, annihilator.parent.base.dtype):
listofpoly.append(K.new(j.rep))
if len(y0) < a or n <= len(y0):
return y0
else:
list_red = [-listofpoly[i] / listofpoly[a]
for i in range(a)]
if len(y0) > a:
y1 = [y0[i] for i in range(a)]
else:
y1 = [i for i in y0]
for i in range(n - a):
sol = 0
for a, b in zip(y1, list_red):
r = DMFsubs(b, Holonomic.x0)
try:
if not r.is_finite:
return y0
except AttributeError:
pass
if isinstance(r, (PolyElement, FracElement)):
r = r.as_expr()
sol += a * r
y1.append(sol)
list_red = _derivate_diff_eq(list_red)
return y0 + y1[len(y0):]
def DMFdiff(frac):
# differentiate a DMF object represented as p/q
if not isinstance(frac, DMF):
return frac.diff()
K = frac.ring
p = K.numer(frac)
q = K.denom(frac)
sol_num = - p * q.diff() + q * p.diff()
sol_denom = q**2
return K((sol_num.rep, sol_denom.rep))
def DMFsubs(frac, x0, mpm=False):
# substitute the point x0 in DMF object of the form p/q
if not isinstance(frac, DMF):
return frac
p = frac.num
q = frac.den
sol_p = S(0)
sol_q = S(0)
if mpm:
from mpmath import mp
for i, j in enumerate(reversed(p)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_p += j * x0**i
for i, j in enumerate(reversed(q)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_q += j * x0**i
if isinstance(sol_p, (PolyElement, FracElement)):
sol_p = sol_p.as_expr()
if isinstance(sol_q, (PolyElement, FracElement)):
sol_q = sol_q.as_expr()
return sol_p / sol_q
def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
"""Converts Polynomials and Rationals to Holonomic.
"""
ispoly = func.is_polynomial()
if not ispoly:
israt = func.is_rational_function()
else:
israt = True
if not (ispoly or israt):
basepoly, ratexp = func.as_base_exp()
if basepoly.is_polynomial() and ratexp.is_Number:
if isinstance(ratexp, Float):
ratexp = nsimplify(ratexp)
m, n = ratexp.p, ratexp.q
is_alg = True
else:
is_alg = False
else:
is_alg = True
if not (ispoly or israt or is_alg):
return None
R = domain.old_poly_ring(x)
_, Dx = DifferentialOperators(R, 'Dx')
# if the function is constant
if not func.has(x):
return HolonomicFunction(Dx, x, 0, [func])
if ispoly:
# differential equation satisfied by polynomial
sol = func * Dx - func.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 == None and x0 == 0 and is_singular:
rep = R.from_sympy(func).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = list(reversed(rep))[i:]
indicial = i
break
y0 = {indicial:coeff}
elif israt:
order = 1
p, q = func.as_numer_denom()
# differential equation satisfied by rational
sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
elif is_alg:
sol = n * (x / m) * Dx - 1
sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 == None and x0 == 0 and is_singular:
rep = R.from_sympy(basepoly).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = S(j)**ratexp
indicial = S(i) * ratexp
break
y0 = {indicial: [coeff]}
if y0 or not initcond:
return HolonomicFunction(sol, x, x0, y0)
if not lenics:
lenics = sol.order
if sol.is_singular(x0):
r = HolonomicFunction(sol, x, x0)._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S(1):
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol, x, x0, y0)
y0 = _find_conditions(func, x, x0, lenics)
while not y0:
x0 += 1
y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol, x, x0, y0)
def _convert_meijerint(func, x, initcond=True, domain=QQ):
args = meijerint._rewrite1(func, x)
if args:
fac, po, g, _ = args
else:
return None
# lists for sum of meijerg functions
fac_list = [fac * i[0] for i in g]
t = po.as_base_exp()
s = t[1] if t[0] is x else S(0)
po_list = [s + i[1] for i in g]
G_list = [i[2] for i in g]
# finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
def _shift(func, s):
z = func.args[-1]
if z.has(I):
z = z.subs(exp_polar, exp)
d = z.collect(x, evaluate=False)
b = list(d)[0]
a = d[b]
t = b.as_base_exp()
b = t[1] if t[0] is x else S(0)
r = s / b
an = (i + r for i in func.args[0][0])
ap = (i + r for i in func.args[0][1])
bm = (i + r for i in func.args[1][0])
bq = (i + r for i in func.args[1][1])
return a**-r, meijerg((an, ap), (bm, bq), z)
coeff, m = _shift(G_list[0], po_list[0])
sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
# add all the meijerg functions after converting to holonomic
for i in range(1, len(G_list)):
coeff, m = _shift(G_list[i], po_list[i])
sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
return sol
def _create_table(table, domain=QQ):
"""
Creates the look-up table. For a similar implementation
see meijerint._create_lookup_table.
"""
def add(formula, annihilator, arg, x0=0, y0=[]):
"""
Adds a formula in the dictionary
"""
table.setdefault(_mytype(formula, x_1), []).append((formula,
HolonomicFunction(annihilator, arg, x0, y0)))
R = domain.old_poly_ring(x_1)
_, Dx = DifferentialOperators(R, 'Dx')
from sympy import (sin, cos, exp, log, erf, sqrt, pi,
sinh, cosh, sinc, erfc, Si, Ci, Shi, erfi)
# add some basic functions
add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
add(exp(x_1), Dx - 1, x_1, 0, 1)
add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
def _find_conditions(func, x, x0, order):
y0 = []
for i in range(order):
val = func.subs(x, x0)
if isinstance(val, NaN):
val = limit(func, x, x0)
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
func = func.diff(x)
return y0
| bsd-3-clause |
FEPanalysis/alchemical-analysis-OLD | alchemical_analysis/alchemical_analysis.py | 1 | 58462 | #!/usr/bin/env python
######################################################################
# Alchemical Analysis: An open tool implementing some recommended practices for analyzing alchemical free energy calculations
# Copyright 2011-2015 UC Irvine and the Authors
#
# Authors: Pavel Klimovich, Michael Shirts and David Mobley
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, see <http://www.gnu.org/licenses/>.
######################################################################
#===================================================================================================
# IMPORTS
#===================================================================================================
## Not a built-in module. Will be called from main, whenever needed. ##
## import pymbar Multistate Bennett Acceptance Ratio estimator. ##
import sys
import numpy
import pickle # for full-precision data storage
from optparse import OptionParser # for parsing command-line options
import os # for os interface
import time as ttt_time # for timing
import pdb # for debugging
#===================================================================================================
# INPUT OPTIONS
#===================================================================================================
parser = OptionParser()
parser.add_option('-a', '--software', dest = 'software', help = 'Package\'s name the data files come from: Gromacs, Sire, Desmond, or AMBER. Default: Gromacs.', default = 'Gromacs')
parser.add_option('-c', '--cfm', dest = 'bCFM', help = 'The Curve-Fitting-Method-based consistency inspector. Default: False.', default = False, action = 'store_true')
parser.add_option('-d', '--dir', dest = 'datafile_directory', help = 'Directory in which data files are stored. Default: Current directory.', default = '.')
parser.add_option('-f', '--forwrev', dest = 'bForwrev', help = 'Plotting the free energy change as a function of time in both directions. The number of time points (an integer) is to be followed the flag. Default: 0', default = 0, type=int)
parser.add_option('-g', '--breakdown', dest = 'breakdown', help = 'Plotting the free energy differences evaluated for each pair of adjacent states for all methods. Default: False.', default = False, action = 'store_true')
parser.add_option('-i', '--threshold', dest = 'uncorr_threshold', help = 'Proceed with correlated samples if the number of uncorrelated samples is found to be less than this number. If 0 is given, the time series analysis will not be performed at all. Default: 50.', default = 50, type=int)
parser.add_option('-k', '--koff', dest = 'bSkipLambdaIndex', help = 'Give a string of lambda indices separated by \'-\' and they will be removed from the analysis. (Another approach is to have only the files of interest present in the directory). Default: None.', default = '')
parser.add_option('-m', '--methods', dest = 'methods', help = 'A list of the methods to esitimate the free energy with. Default: [TI, TI-CUBIC, DEXP, IEXP, BAR, MBAR]. To add/remove methods to the above list provide a string formed of the method strings preceded with +/-. For example, \'-ti_cubic+gdel\' will turn methods into [TI, DEXP, IEXP, BAR, MBAR, GDEL]. \'ti_cubic+gdel\', on the other hand, will call [TI-CUBIC, GDEL]. \'all\' calls the full list of supported methods [TI, TI-CUBIC, DEXP, IEXP, GINS, GDEL, BAR, UBAR, RBAR, MBAR].', default = '')
parser.add_option('-n', '--uncorr', dest = 'uncorr', help = 'The observable to be used for the autocorrelation analysis; either \'dhdl\' (default) or \'dE\'. In the latter case the energy differences dE_{i,i+1} (dE_{i,i-1} for the last lambda) are used.', default = 'dhdl')
parser.add_option('-o', '--out', dest = 'output_directory', help = 'Directory in which the output files produced by this script will be stored. Default: Same as datafile_directory.', default = '')
parser.add_option('-p', '--prefix', dest = 'prefix', help = 'Prefix for datafile sets, i.e.\'dhdl\' (default).', default = 'dhdl')
parser.add_option('-q', '--suffix', dest = 'suffix', help = 'Suffix for datafile sets, i.e. \'xvg\' (default).', default = 'xvg')
parser.add_option('-r', '--decimal', dest = 'decimal', help = 'The number of decimal places the free energies are to be reported with. No worries, this is for the text output only; the full-precision data will be stored in \'results.pickle\'. Default: 3.', default = 3, type=int)
parser.add_option('-s', '--skiptime', dest = 'equiltime', help = 'Discard data prior to this specified time as \'equilibration\' data. Units picoseconds. Default: 0 ps.', default = 0, type=float)
parser.add_option('-t', '--temperature', dest = 'temperature', help = "Temperature in K. Default: 298 K.", default = 298, type=float)
parser.add_option('-u', '--units', dest = 'units', help = 'Units to report energies: \'kJ\', \'kcal\', and \'kBT\'. Default: \'kJ\'', default = 'kJ')
parser.add_option('-v', '--verbose', dest = 'verbose', help = 'Verbose option. Default: False.', default = False, action = 'store_true')
parser.add_option('-w', '--overlap', dest = 'overlap', help = 'Print out and plot the overlap matrix. Default: False.', default = False, action = 'store_true')
parser.add_option('-x', '--ignoreWL', dest = 'bIgnoreWL', help = 'Do not check whether the WL weights are equilibrated. No log file needed as an accompanying input.', default = False, action = 'store_true')
parser.add_option('-y', '--tolerance', dest = 'relative_tolerance', help = "Convergence criterion for the energy estimates with BAR and MBAR. Default: 1e-10.", default = 1e-10, type=float)
parser.add_option('-z', '--initialize', dest = 'init_with', help = 'The initial MBAR free energy guess; either \'BAR\' or \'zeroes\'. Default: \'BAR\'.', default = 'BAR')
#===================================================================================================
# FUNCTIONS: Miscellanea.
#===================================================================================================
def getMethods(string):
"""Returns a list of the methods the free energy is to be estimated with."""
all_methods = ['TI','TI-CUBIC','DEXP','IEXP','GINS','GDEL','BAR','UBAR','RBAR','MBAR']
methods = ['TI','TI-CUBIC','DEXP','IEXP','BAR','MBAR']
if (numpy.array(['Sire']) == P.software.title()).any():
methods = ['TI','TI-CUBIC']
if not string:
return methods
def addRemove(string):
operation = string[0]
string = string[1:]+'+'
method = ''
for c in string:
if c.isalnum():
method += c
elif c=='_':
method += '-'
elif (c=='-' or c=='+'):
if method in all_methods:
if operation=='-':
if method in methods:
methods.remove(method)
else:
if not method in methods:
methods.append(method)
method = ''
operation = c
else:
parser.error("\nThere is no '%s' in the list of supported methods." % method)
else:
parser.error("\nUnknown character '%s' in the method string is found." % c)
return
if string=='ALL':
methods = all_methods
else:
primo = string[0]
if primo.isalpha():
methods = string.replace('+', ' ').replace('_', '-').split()
methods = [m for m in methods if m in all_methods]
elif primo=='+' or primo=='-':
addRemove(string)
else:
parser.error("\nUnknown character '%s' in the method string is found." % primo)
return methods
def checkUnitsAndMore(units):
kB = 1.3806488*6.02214129/1000.0 # Boltzmann's constant (kJ/mol/K).
beta = 1./(kB*P.temperature)
b_kcal = (numpy.array(['Sire', 'Amber']) == P.software.title()).any()
if units == 'kJ':
beta_report = beta/4.184**b_kcal
units = '(kJ/mol)'
elif units == 'kcal':
beta_report = 4.184**(not b_kcal)*beta
units = '(kcal/mol)'
elif units == 'kBT':
beta_report = 1
units = '(k_BT)'
else:
parser.error('\nI don\'t understand the unit type \'%s\': the only options \'kJ\', \'kcal\', and \'kBT\'' % units)
if not P.output_directory:
P.output_directory = P.datafile_directory
if P.overlap:
if not 'MBAR' in P.methods:
parser.error("\nMBAR is not in 'methods'; can't plot the overlap matrix.")
return units, beta, beta_report
def timeStatistics(stime):
etime = ttt_time.time()
tm = int((etime-stime)/60.)
th = int(tm/60.)
ts = '%.2f' % (etime-stime-60*(tm+60*th))
return th, tm, ts, ttt_time.asctime()
#===================================================================================================
# FUNCTIONS: The autocorrelation analysis.
#===================================================================================================
def uncorrelate(sta, fin, do_dhdl=False):
"""Identifies uncorrelated samples and updates the arrays of the reduced potential energy and dhdlt retaining data entries of these samples only.
'sta' and 'fin' are the starting and final snapshot positions to be read, both are arrays of dimension K."""
if not P.uncorr_threshold:
if P.software.title()=='Sire':
return dhdlt, nsnapshots, None
return dhdlt, nsnapshots, u_klt
import pymbar ## this is not a built-in module ##
u_kln = numpy.zeros([K,K,max(fin-sta)], numpy.float64) # u_kln[k,m,n] is the reduced potential energy of uncorrelated sample index n from state k evaluated at state m
N_k = numpy.zeros(K, int) # N_k[k] is the number of uncorrelated samples from state k
g = numpy.zeros(K,float) # autocorrelation times for the data
if do_dhdl:
dhdl = numpy.zeros([K,n_components,max(fin-sta)], float) #dhdl is value for dhdl for each component in the file at each time.
print "\n\nNumber of correlated and uncorrelated samples:\n\n%6s %12s %12s %12s\n" % ('State', 'N', 'N_k', 'N/N_k')
UNCORR_OBSERVABLE = {'Gromacs':P.uncorr, 'Amber':'dhdl', 'Sire':'dhdl', 'Desmond':'dE'}[P.software.title()]
if UNCORR_OBSERVABLE == 'dhdl':
#Uncorrelate based on dhdl values at a given lambda
for k in range(K):
# Sum up over the energy components; notice, that only the relevant data is being used in the third dimension.
dhdl_sum = numpy.sum(dhdlt[k,:,sta[k]:fin[k]], axis=0)
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
# (alternatively, could use the energy differences -- here, we will use total dhdl).
g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
if do_dhdl:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dhdl_sum))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
print "%6s %12s %12s %12.2f" % (k, fin[k], N_k[k], g[k])
for n in range(n_components):
dhdl[k,n,0:N] = dhdlt[k,n,indices]
if UNCORR_OBSERVABLE == 'dE':
#Decorrelate based on energy differences between lambdas
for k in range(K):
# Sum up over the energy components as above using only the relevant data; here we use energy differences
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
dE = u_klt[k,k+1,sta[k]:fin[k]] if not k==K-1 else u_klt[k,k-1,sta[k]:fin[k]]
g[k] = pymbar.timeseries.statisticalInefficiency(dE)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dE, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dE))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
return (dhdl, N_k, u_kln)
return (N_k, u_kln)
#===================================================================================================
# FUNCTIONS: The MBAR workhorse.
#===================================================================================================
def estimatewithMBAR(u_kln, N_k, reltol, regular_estimate=False):
"""Computes the MBAR free energy given the reduced potential and the number of relevant entries in it."""
def plotOverlapMatrix(O):
"""Plots the probability of observing a sample from state i (row) in state j (column).
For convenience, the neigboring state cells are fringed in bold."""
max_prob = O.max()
fig = pl.figure(figsize=(K/2.,K/2.))
fig.add_subplot(111, frameon=False, xticks=[], yticks=[])
for i in range(K):
if i!=0:
pl.axvline(x=i, ls='-', lw=0.5, color='k', alpha=0.25)
pl.axhline(y=i, ls='-', lw=0.5, color='k', alpha=0.25)
for j in range(K):
if O[j,i] < 0.005:
ii = ''
else:
ii = ("%.2f" % O[j,i])[1:]
alf = O[j,i]/max_prob
pl.fill_between([i,i+1], [K-j,K-j], [K-(j+1),K-(j+1)], color='k', alpha=alf)
pl.annotate(ii, xy=(i,j), xytext=(i+0.5,K-(j+0.5)), size=8, textcoords='data', va='center', ha='center', color=('k' if alf < 0.5 else 'w'))
if P.bSkipLambdaIndex:
ks = [int(l) for l in P.bSkipLambdaIndex.split('-')]
ks = numpy.delete(numpy.arange(K+len(ks)), ks)
else:
ks = range(K)
for i in range(K):
pl.annotate(ks[i], xy=(i+0.5, 1), xytext=(i+0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')
pl.annotate(ks[i], xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K-(i+0.5)), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')
pl.annotate('$\lambda$', xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')
pl.plot([0,K], [0,0], 'k-', lw=4.0, solid_capstyle='butt')
pl.plot([K,K], [0,K], 'k-', lw=4.0, solid_capstyle='butt')
pl.plot([0,0], [0,K], 'k-', lw=2.0, solid_capstyle='butt')
pl.plot([0,K], [K,K], 'k-', lw=2.0, solid_capstyle='butt')
cx = sorted(2*range(K+1))
cy = sorted(2*range(K+1), reverse=True)
pl.plot(cx[2:-1], cy[1:-2], 'k-', lw=2.0)
pl.plot(numpy.array(cx[2:-3])+1, cy[1:-4], 'k-', lw=2.0)
pl.plot(cx[1:-2], numpy.array(cy[:-3])-1, 'k-', lw=2.0)
pl.plot(cx[1:-4], numpy.array(cy[:-5])-2, 'k-', lw=2.0)
pl.xlim(-1, K)
pl.ylim(0, K+1)
pl.savefig(os.path.join(P.output_directory, 'O_MBAR.pdf'), bbox_inches='tight', pad_inches=0.0)
pl.close(fig)
return
if regular_estimate:
print "\nEstimating the free energy change with MBAR..."
MBAR = pymbar.mbar.MBAR(u_kln, N_k, verbose = P.verbose, relative_tolerance = reltol, initialize = P.init_with)
# Get matrix of dimensionless free energy differences and uncertainty estimate.
(Deltaf_ij, dDeltaf_ij, theta_ij ) = MBAR.getFreeEnergyDifferences(uncertainty_method='svd-ew', return_theta = True)
if P.verbose:
print "Matrix of free energy differences\nDeltaf_ij:\n%s\ndDeltaf_ij:\n%s" % (Deltaf_ij, dDeltaf_ij)
if regular_estimate:
if P.overlap:
print "The overlap matrix is..."
O = MBAR.computeOverlap()[2]
for k in range(K):
line = ''
for l in range(K):
line += ' %5.2f ' % O[k, l]
print line
plotOverlapMatrix(O)
print "\nFor a nicer figure look at 'O_MBAR.pdf'"
return (Deltaf_ij, dDeltaf_ij)
return (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
#===================================================================================================
# FUNCTIONS: Thermodynamic integration.
#===================================================================================================
class naturalcubicspline:
def __init__(self, x):
# define some space
L = len(x)
H = numpy.zeros([L,L],float)
M = numpy.zeros([L,L],float)
BW = numpy.zeros([L,L],float)
AW = numpy.zeros([L,L],float)
DW = numpy.zeros([L,L],float)
h = x[1:L]-x[0:L-1]
ih = 1.0/h
# define the H and M matrix, from p. 371 "applied numerical methods with matlab, Chapra"
H[0,0] = 1
H[L-1,L-1] = 1
for i in range(1,L-1):
H[i,i] = 2*(h[i-1]+h[i])
H[i,i-1] = h[i-1]
H[i,i+1] = h[i]
M[i,i] = -3*(ih[i-1]+ih[i])
M[i,i-1] = 3*(ih[i-1])
M[i,i+1] = 3*(ih[i])
CW = numpy.dot(numpy.linalg.inv(H),M) # this is the matrix translating c to weights in f.
# each row corresponds to the weights for each c.
# from CW, define the other coefficient matrices
for i in range(0,L-1):
BW[i,:] = -(h[i]/3)*(2*CW[i,:]+CW[i+1,:])
BW[i,i] += -ih[i]
BW[i,i+1] += ih[i]
DW[i,:] = (ih[i]/3)*(CW[i+1,:]-CW[i,:])
AW[i,i] = 1
# Make copies of the arrays we'll be using in the future.
self.x = x.copy()
self.AW = AW.copy()
self.BW = BW.copy()
self.CW = CW.copy()
self.DW = DW.copy()
# find the integrating weights
self.wsum = numpy.zeros([L],float)
self.wk = numpy.zeros([L-1,L],float)
for k in range(0,L-1):
w = DW[k,:]*(h[k]**4)/4.0 + CW[k,:]*(h[k]**3)/3.0 + BW[k,:]*(h[k]**2)/2.0 + AW[k,:]*(h[k])
self.wk[k,:] = w
self.wsum += w
def interpolate(self,y,xnew):
if len(self.x) != len(y):
parser.error("\nThe length of 'y' should be consistent with that of 'self.x'. I cannot perform linear algebra operations.")
# get the array of actual coefficients by multiplying the coefficient matrix by the values
a = numpy.dot(self.AW,y)
b = numpy.dot(self.BW,y)
c = numpy.dot(self.CW,y)
d = numpy.dot(self.DW,y)
N = len(xnew)
ynew = numpy.zeros([N],float)
for i in range(N):
# Find the index of 'xnew[i]' it would have in 'self.x'.
j = numpy.searchsorted(self.x, xnew[i]) - 1
lamw = xnew[i] - self.x[j]
ynew[i] = d[j]*lamw**3 + c[j]*lamw**2 + b[j]*lamw + a[j]
# Preserve the terminal points.
ynew[0] = y[0]
ynew[-1] = y[-1]
return ynew
def TIprelim(lv):
# Lambda vectors spacing.
dlam = numpy.diff(lv, axis=0)
lchange = numpy.zeros([K,n_components],bool) # booleans for which lambdas are changing
for j in range(n_components):
# need to identify range over which lambda doesn't change, and not interpolate over that range.
for k in range(K-1):
if (lv[k+1,j]-lv[k,j] > 0):
lchange[k,j] = True
lchange[k+1,j] = True
if 'ave_dhdl' in globals() and 'std_dhdl' in globals():
return lchange, dlam, globals()['ave_dhdl'], globals()['std_dhdl']
# Compute <dhdl> and std(dhdl) for each component, for each lambda; multiply them by beta to make unitless.
ave_dhdl = numpy.zeros([K,n_components],float)
std_dhdl = numpy.zeros([K,n_components],float)
for k in range(K):
ave_dhdl[k,:] = P.beta*numpy.average(dhdl[k,:,0:N_k[k]],axis=1)
std_dhdl[k,:] = P.beta*numpy.std(dhdl[k,:,0:N_k[k]],axis=1)/numpy.sqrt(N_k[k]-1)
return lchange, dlam, ave_dhdl, std_dhdl
def getSplines(lchange):
# construct a map back to the original components
mapl = numpy.zeros([K,n_components],int) # map back to the original k from the components
for j in range(n_components):
incr = 0
for k in range(K):
if (lchange[k,j]):
mapl[k,j] += incr
incr +=1
# put together the spline weights for the different components
cubspl = list()
for j in range(n_components):
lv_lchange = lv[lchange[:,j],j]
if len(lv_lchange) == 0: # handle the all-zero lv column
cubspl.append(0)
else:
spl = naturalcubicspline(lv_lchange)
cubspl.append(spl)
return cubspl, mapl
#===================================================================================================
# FUNCTIONS: This one estimates dF and ddF for all pairs of adjacent states and stores them.
#===================================================================================================
def estimatePairs():
print ("Estimating the free energy change with %s..." % ', '.join(P.methods)).replace(', MBAR', '')
df_allk = list(); ddf_allk = list()
for k in range(K-1):
df = dict(); ddf = dict()
for name in P.methods:
if name == 'TI':
#===================================================================================================
# Estimate free energy difference with TI; interpolating with the trapezoidal rule.
#===================================================================================================
df['TI'] = 0.5*numpy.dot(dlam[k],(ave_dhdl[k]+ave_dhdl[k+1]))
ddf['TI'] = 0.5*numpy.sqrt(numpy.dot(dlam[k]**2,std_dhdl[k]**2+std_dhdl[k+1]**2))
if name == 'TI-CUBIC':
#===================================================================================================
# Estimate free energy difference with TI; interpolating with the natural cubic splines.
#===================================================================================================
df['TI-CUBIC'], ddf['TI-CUBIC'] = 0, 0
for j in range(n_components):
if dlam[k,j] > 0:
lj = lchange[:,j]
df['TI-CUBIC'] += numpy.dot(cubspl[j].wk[mapl[k,j]],ave_dhdl[lj,j])
ddf['TI-CUBIC'] += numpy.dot(cubspl[j].wk[mapl[k,j]]**2,std_dhdl[lj,j]**2)
ddf['TI-CUBIC'] = numpy.sqrt(ddf['TI-CUBIC'])
if any(name == m for m in ['DEXP', 'GDEL', 'BAR', 'UBAR', 'RBAR']):
w_F = u_kln[k,k+1,0:N_k[k]] - u_kln[k,k,0:N_k[k]]
if name == 'DEXP':
#===================================================================================================
# Estimate free energy difference with Forward-direction EXP (in this case, Deletion from solvent).
#===================================================================================================
(df['DEXP'], ddf['DEXP']) = pymbar.exp.EXP(w_F)
if name == 'GDEL':
#===================================================================================================
# Estimate free energy difference with a Gaussian estimate of EXP (in this case, deletion from solvent)
#===================================================================================================
(df['GDEL'], ddf['GDEL']) = pymbar.exp.EXPGauss(w_F)
if any(name == m for m in ['IEXP', 'GINS', 'BAR', 'UBAR', 'RBAR']):
w_R = u_kln[k+1,k,0:N_k[k+1]] - u_kln[k+1,k+1,0:N_k[k+1]]
if name == 'IEXP':
#===================================================================================================
# Estimate free energy difference with Reverse-direction EXP (in this case, insertion into solvent).
#===================================================================================================
(rdf,rddf) = pymbar.exp.EXP(w_R)
(df['IEXP'], ddf['IEXP']) = (-rdf,rddf)
if name == 'GINS':
#===================================================================================================
# Estimate free energy difference with a Gaussian estimate of EXP (in this case, insertion into solvent)
#===================================================================================================
(rdf,rddf) = pymbar.exp.EXPGauss(w_R)
(df['GINS'], ddf['GINS']) = (-rdf,rddf)
if name == 'BAR':
#===================================================================================================
# Estimate free energy difference with BAR; use w_F and w_R computed above.
#===================================================================================================
(df['BAR'], ddf['BAR']) = pymbar.bar.BAR(w_F, w_R, relative_tolerance=P.relative_tolerance, verbose = P.verbose)
if name == 'UBAR':
#===================================================================================================
# Estimate free energy difference with unoptimized BAR -- assume dF is zero, and just do one evaluation
#===================================================================================================
(df['UBAR'], ddf['UBAR']) = pymbar.bar.BAR(w_F, w_R, verbose = P.verbose,iterated_solution=False)
if name == 'RBAR':
#===================================================================================================
# Estimate free energy difference with Unoptimized BAR over range of free energy values, and choose the one
# that is self consistently best.
#===================================================================================================
min_diff = 1E6
best_udf = 0
for trial_udf in range(-10,10,1):
(udf, uddf) = pymbar.bar.BAR(w_F, w_R, DeltaF=trial_udf, iterated_solution=False, verbose=P.verbose)
diff = numpy.abs(udf - trial_udf)
if (diff < min_diff):
best_udf = udf
best_uddf = uddf
min_diff = diff
(df['RBAR'], ddf['RBAR']) = (best_udf,best_uddf)
if name == 'MBAR':
#===================================================================================================
# Store the MBAR free energy difference (already estimated above) properly, i.e. by state.
#===================================================================================================
(df['MBAR'], ddf['MBAR']) = Deltaf_ij[k,k+1], dDeltaf_ij[k,k+1]
df_allk = numpy.append(df_allk,df)
ddf_allk = numpy.append(ddf_allk,ddf)
return df_allk, ddf_allk
#===================================================================================================
# FUNCTIONS: All done with calculations; summarize and print stats.
#===================================================================================================
def totalEnergies():
# Count up the charging states.
startcoul = 0
endcoul = 0
startvdw = 0
endvdw = 0
for lv_n in ['vdw','coul','fep']:
if lv_n in P.lv_names:
_ndx_char = P.lv_names.index(lv_n)
lv_char = lv[:, _ndx_char]
if not (lv_char == lv_char[0]).all():
if lv_n == 'vdw':
endvdw = (lv_char != 1).sum()
if lv_n == 'fep':
endcoul = (lv_char != 1).sum()
ndx_char = P.lv_names.index(lv_n)
if lv_n == 'coul':
endcoul = (lv_char != 1).sum()
ndx_char = P.lv_names.index(lv_n)
# Figure out if coulomb section comes before or after vdw section
if endcoul > endvdw:
startcoul = endvdw
startvdw = 0
else:
startcoul = 0
startvdw = endcoul
segments = ['Coulomb' , 'vdWaals' , 'TOTAL']
segmentstarts = [startcoul , startvdw , 0 ]
segmentends = [endcoul , endvdw , K-1 ]
dFs = []
ddFs = []
# Perform the energy segmentation; be pedantic about the TI cumulative ddF's (see Section 3.1 of the paper).
for i in range(len(segments)):
segment = segments[i]; segstart = segmentstarts[i]; segend = segmentends[i]
dF = dict.fromkeys(P.methods, 0)
ddF = dict.fromkeys(P.methods, 0)
for name in P.methods:
if name == 'MBAR':
dF['MBAR'] = Deltaf_ij[segstart, segend]
ddF['MBAR'] = dDeltaf_ij[segstart, segend]
elif name[0:2] == 'TI':
for k in range(segstart, segend):
dF[name] += df_allk[k][name]
if segment == 'Coulomb':
jlist = [ndx_char] if endcoul>0 else []
elif segment == 'vdWaals':
jlist = []
elif segment == 'TOTAL':
jlist = range(n_components)
for j in jlist:
lj = lchange[:,j]
if not (lj == False).all(): # handle the all-zero lv column
if name == 'TI-CUBIC':
ddF[name] += numpy.dot((cubspl[j].wsum)**2,std_dhdl[lj,j]**2)
elif name == 'TI':
h = numpy.trim_zeros(dlam[:,j])
wsum = 0.5*(numpy.append(h,0) + numpy.append(0,h))
ddF[name] += numpy.dot(wsum**2,std_dhdl[lj,j]**2)
ddF[name] = numpy.sqrt(ddF[name])
else:
for k in range(segstart,segend):
dF[name] += df_allk[k][name]
ddF[name] += (ddf_allk[k][name])**2
ddF[name] = numpy.sqrt(ddF[name])
dFs.append(dF)
ddFs.append(ddF)
for name in P.methods: # 'vdWaals' = 'TOTAL' - 'Coulomb'
ddFs[1][name] = (ddFs[2][name]**2 - ddFs[0][name]**2)**0.5
# Display results.
def printLine(str1, str2, d1=None, d2=None):
"""Fills out the results table linewise."""
print str1,
text = str1
for name in P.methods:
if d1 == 'plain':
print str2,
text += ' ' + str2
if d1 == 'name':
print str2 % (name, P.units),
text += ' ' + str2 % (name, P.units)
if d1 and d2:
print str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report),
text += ' ' + str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report)
print ''
outtext.append(text + '\n')
return
d = P.decimal
str_dash = (d+7 + 6 + d+2)*'-'
str_dat = ('X%d.%df +- X%d.%df' % (d+7, d, d+2, d)).replace('X', '%')
str_names = ('X%ds X-%ds' % (d+6, d+8)).replace('X', '%')
outtext = []
printLine(12*'-', str_dash, 'plain')
printLine('%-12s' % ' States', str_names, 'name')
printLine(12*'-', str_dash, 'plain')
for k in range(K-1):
printLine('%4d -- %-4d' % (k, k+1), str_dat, df_allk[k], ddf_allk[k])
printLine(12*'-', str_dash, 'plain')
remark = ["", "A remark on the energy components interpretation: ",
" 'vdWaals' is computed as 'TOTAL' - 'Coulomb', where ",
" 'Coulomb' is found as the free energy change between ",
" the states defined by the lambda vectors (0,0,...,0) ",
" and (1,0,...,0), the only varying vector component ",
" being either 'coul-lambda' or 'fep-lambda'. "]
w = 12 + (1+len(str_dash))*len(P.methods)
str_align = '{:I^%d}' % w
if len(P.lv_names)>1:
for i in range(len(segments)):
printLine('%9s: ' % segments[i], str_dat, dFs[i], ddFs[i])
for i in remark:
print str_align.replace('I', ' ').format(i)
else:
printLine('%9s: ' % segments[-1], str_dat, dFs[-1], ddFs[-1])
# Store results.
outfile = open(os.path.join(P.output_directory, 'results.txt'), 'w')
outfile.write('# Command line was: %s\n\n' % ' '.join(sys.argv) )
outfile.writelines(outtext)
outfile.close()
P.datafile_directory = os.getcwd()
P.when_analyzed = ttt_time.asctime()
P.ddf_allk = ddf_allk
P.df_allk = df_allk
P.ddFs = ddFs
P.dFs = dFs
outfile = open(os.path.join(P.output_directory, 'results.pickle'), 'w')
pickle.dump(P, outfile)
outfile.close()
print '\n'+w*'*'
for i in [" The above table has been stored in ", " "+P.output_directory+"/results.txt ",
" while the full-precision data ", " (along with the simulation profile) in ", " "+P.output_directory+"/results.pickle "]:
print str_align.format('{:^40}'.format(i))
print w*'*'
return
#===================================================================================================
# FUNCTIONS: Free energy change vs. simulation time. Called by the -f flag.
#===================================================================================================
def dF_t():
def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf):
"""Plots the free energy change computed using the equilibrated snapshots between the proper target time frames (f_ts and r_ts)
in both forward (data points are stored in F_df and F_ddf) and reverse (data points are stored in R_df and R_ddf) directions."""
fig = pl.figure(figsize=(8,6))
ax = fig.add_subplot(111)
pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2)
pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2)
for dire in ['top', 'right']:
ax.spines[dire].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_fts = max(f_ts)
rr_ts = [aa/max_fts for aa in f_ts[::-1]]
f_ts = [aa/max_fts for aa in f_ts]
r_ts = [aa/max_fts for aa in r_ts]
line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5)
for i in range(len(f_ts)):
line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1)
line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2)
for i in range(len(rr_ts)):
line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3)
line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4)
pl.xlim(r_ts[0], f_ts[-1])
pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10)
pl.yticks(fontsize=10)
leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=9, prop=FP(size=18), frameon=False)
pl.xlabel(r'$\mathrm{Fraction\/of\/the\/simulation\/time}$', fontsize=16, color='#151B54')
pl.ylabel(r'$\mathrm{\Delta G\/%s}$' % P.units, fontsize=16, color='#151B54')
pl.xticks(f_ts, ['%.2f' % i for i in f_ts])
pl.tick_params(axis='x', color='#D2B9D3')
pl.tick_params(axis='y', color='#D2B9D3')
pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf'))
pl.close(fig)
return
if not 'MBAR' in P.methods:
parser.error("\nCurrent version of the dF(t) analysis works with MBAR only and the method is not found in the list.")
if not (P.snap_size[0] == numpy.array(P.snap_size)).all(): # this could be circumvented
parser.error("\nThe snapshot size isn't the same for all the files; cannot perform the dF(t) analysis.")
# Define a list of bForwrev equidistant time frames at which the free energy is to be estimated; count up the snapshots embounded between the time frames.
n_tf = P.bForwrev + 1
nss_tf = numpy.zeros([n_tf, K], int)
increment = 1./(n_tf-1)
if P.bExpanded:
from collections import Counter # for counting elements in an array
tf = numpy.arange(0,1+increment,increment)*(numpy.sum(nsnapshots)-1)+1
tf[0] = 0
for i in range(n_tf-1):
nss = Counter(extract_states[tf[i]:tf[i+1]])
nss_tf[i+1] = numpy.array([nss[j] for j in range(K)])
else:
tf = numpy.arange(0,1+increment,increment)*(max(nsnapshots)-1)+1
tf[0] = 0
for i in range(n_tf-1):
nss_tf[i+1] = numpy.array([min(j, tf[i+1]) for j in nsnapshots]) - numpy.sum(nss_tf[:i+1],axis=0)
# Define the real time scale (in ps) rather than a snapshot sequence.
ts = ["%.1f" % ((i-(i!=tf[0]))*P.snap_size[0] + P.equiltime) for i in tf]
# Initialize arrays to store data points to be plotted.
F_df = numpy.zeros(n_tf-1, float)
F_ddf = numpy.zeros(n_tf-1, float)
R_df = numpy.zeros(n_tf-1, float)
R_ddf = numpy.zeros(n_tf-1, float)
# Store the MBAR energy that accounts for all the equilibrated snapshots (has already been computed in the previous section).
F_df[-1], F_ddf[-1] = (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
R_df[0], R_ddf[0] = (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
# Do the forward analysis.
print "Forward dF(t) analysis...\nEstimating the free energy change using the data up to"
sta = nss_tf[0]
for i in range(n_tf-2):
print "%60s ps..." % ts[i+1]
fin = numpy.sum(nss_tf[:i+2],axis=0)
N_k, u_kln = uncorrelate(nss_tf[0], numpy.sum(nss_tf[:i+2],axis=0))
F_df[i], F_ddf[i] = estimatewithMBAR(u_kln, N_k, P.relative_tolerance)
# Do the reverse analysis.
print "Reverse dF(t) analysis...\nUsing the data starting from"
fin = numpy.sum(nss_tf[:],axis=0)
for i in range(n_tf-2):
print "%34s ps..." % ts[i+1]
sta = numpy.sum(nss_tf[:i+2],axis=0)
N_k, u_kln = uncorrelate(sta, fin)
R_df[i+1], R_ddf[i+1] = estimatewithMBAR(u_kln, N_k, P.relative_tolerance)
print """\n The free energies %s evaluated by using the trajectory
snaphots corresponding to various time intervals for both the
reverse and forward (in parentheses) direction.\n""" % P.units
print "%s\n %20s %19s %20s\n%s" % (70*'-', 'Time interval, ps','Reverse', 'Forward', 70*'-')
print "%10s -- %s\n%10s -- %-10s %11.3f +- %5.3f %16s\n" % (ts[0], ts[-1], '('+ts[0], ts[0]+')', R_df[0], R_ddf[0], 'XXXXXX')
for i in range(1, len(ts)-1):
print "%10s -- %s\n%10s -- %-10s %11.3f +- %5.3f %11.3f +- %5.3f\n" % (ts[i], ts[-1], '('+ts[0], ts[i]+')', R_df[i], R_ddf[i], F_df[i-1], F_ddf[i-1])
print "%10s -- %s\n%10s -- %-10s %16s %15.3f +- %5.3f\n%s" % (ts[-1], ts[-1], '('+ts[0], ts[-1]+')', 'XXXXXX', F_df[-1], F_ddf[-1], 70*'-')
# Plot the forward and reverse dF(t); store the data points in the text file.
print "Plotting data to the file dF_t.pdf...\n\n"
plotdFvsTime([float(i) for i in ts[1:]], [float(i) for i in ts[:-1]], F_df, R_df, F_ddf, R_ddf)
outtext = ["%12s %10s %-10s %17s %10s %s\n" % ('Time (ps)', 'Forward', P.units, 'Time (ps)', 'Reverse', P.units)]
outtext+= ["%10s %11.3f +- %5.3f %18s %11.3f +- %5.3f\n" % (ts[1:][i], F_df[i], F_ddf[i], ts[:-1][i], R_df[i], R_ddf[i]) for i in range(len(F_df))]
outfile = open(os.path.join(P.output_directory, 'dF_t.txt'), 'w'); outfile.writelines(outtext); outfile.close()
return
#===================================================================================================
# FUNCTIONS: Free energy change breakdown (into lambda-pair dFs). Called by the -g flag.
#===================================================================================================
def plotdFvsLambda():
def plotdFvsLambda1():
"""Plots the free energy differences evaluated for each pair of adjacent states for all methods."""
x = numpy.arange(len(df_allk))
if x[-1]<8:
fig = pl.figure(figsize = (8,6))
else:
fig = pl.figure(figsize = (len(x),6))
width = 1./(len(P.methods)+1)
elw = 30*width
colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}
lines = tuple()
for name in P.methods:
y = [df_allk[i][name]/P.beta_report for i in x]
ye = [ddf_allk[i][name]/P.beta_report for i in x]
line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.1*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))
lines += (line[0],)
pl.xlabel('States', fontsize=12, color='#151B54')
pl.ylabel('$\Delta G$ '+P.units, fontsize=12, color='#151B54')
pl.xticks(x+0.5*width*len(P.methods), tuple(['%d--%d' % (i, i+1) for i in x]), fontsize=8)
pl.yticks(fontsize=8)
pl.xlim(x[0], x[-1]+len(lines)*width)
ax = pl.gca()
for dir in ['right', 'top', 'bottom']:
ax.spines[dir].set_color('none')
ax.yaxis.set_ticks_position('left')
for tick in ax.get_xticklines():
tick.set_visible(False)
leg = pl.legend(lines, tuple(P.methods), loc=3, ncol=2, prop=FP(size=10), fancybox=True)
leg.get_frame().set_alpha(0.5)
pl.title('The free energy change breakdown', fontsize = 12)
pl.savefig(os.path.join(P.output_directory, 'dF_state_long.pdf'), bbox_inches='tight')
pl.close(fig)
return
def plotdFvsLambda2(nb=10):
"""Plots the free energy differences evaluated for each pair of adjacent states for all methods.
The layout is approximately 'nb' bars per subplot."""
x = numpy.arange(len(df_allk))
if len(x) < nb:
return
xs = numpy.array_split(x, len(x)/nb+1)
mnb = max([len(i) for i in xs])
fig = pl.figure(figsize = (8,6))
width = 1./(len(P.methods)+1)
elw = 30*width
colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}
ndx = 1
for x in xs:
lines = tuple()
ax = pl.subplot(len(xs), 1, ndx)
for name in P.methods:
y = [df_allk[i][name]/P.beta_report for i in x]
ye = [ddf_allk[i][name]/P.beta_report for i in x]
line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))
lines += (line[0],)
for dir in ['left', 'right', 'top', 'bottom']:
if dir == 'left':
ax.yaxis.set_ticks_position(dir)
else:
ax.spines[dir].set_color('none')
pl.yticks(fontsize=10)
ax.xaxis.set_ticks([])
for i in x+0.5*width*len(P.methods):
ax.annotate('$\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')
pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))
ndx += 1
leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\mathrm{\Delta G\/%s\/}\mathit{vs.}\/\mathrm{lambda\/pair}$' % P.units, fancybox=True)
leg.get_frame().set_alpha(0.5)
pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')
pl.close(fig)
return
def plotTI():
"""Plots the ave_dhdl array as a function of the lambda value.
If (TI and TI-CUBIC in methods) -- plots the TI integration area and the TI-CUBIC interpolation curve,
elif (only one of them in methods) -- plots the integration area of the method."""
min_dl = dlam[dlam != 0].min()
S = int(0.4/min_dl)
fig = pl.figure(figsize = (8,6))
ax = fig.add_subplot(1,1,1)
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for k, spine in ax.spines.items():
spine.set_zorder(12.2)
xs, ndx, dx = [0], 0, 0.001
colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']
min_y, max_y = 0, 0
lines = tuple()
## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper
lv_names2 = []
for j in range(n_components):
y = ave_dhdl[:,j]
if not (y == 0).all():
lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())
for j in range(n_components):
y = ave_dhdl[:,j]
if not (y == 0).all():
#if not cubspl[j] == 0:
# Get the coordinates.
lj = lchange[:,j]
x = lv[:,j][lj]
y = y[lj]/P.beta_report
if 'TI' in P.methods:
# Plot the TI integration area.
ss = 'TI'
for i in range(len(x)-1):
min_y = min(y.min(), min_y)
max_y = max(y.max(), max_y)
#pl.plot(x,y)
if i%2==0:
pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)
else:
pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)
xlegend = [-100*wnum for wnum in range(len(lv_names2))]
pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper
if 'TI-CUBIC' in P.methods:
# Plot the TI-CUBIC interpolation curve.
ss += ' and TI-CUBIC'
xnew = numpy.arange(0, 1+dx, dx)
ynew = cubspl[j].interpolate(y, xnew)
min_y = min(ynew.min(), min_y)
max_y = max(ynew.max(), max_y)
pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)
else:
# Plot the TI-CUBIC integration area.
ss = 'TI-CUBIC'
for i in range(len(x)-1):
xnew = numpy.arange(x[i], x[i+1]+dx, dx)
ynew = cubspl[j].interpolate(y, xnew)
ynew[0], ynew[-1] = y[i], y[i+1]
min_y = min(ynew.min(), min_y)
max_y = max(ynew.max(), max_y)
if i%2==0:
pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)
else:
pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)
# Store the abscissa values and update the subplot index.
xs += (x+ndx).tolist()[1:]
ndx += 1
# Make sure the tick labels are not overcrowded.
xs = numpy.array(xs)
dl_mat = numpy.array([xs-i for i in xs])
ri = range(len(xs))
def getInd(r=ri, z=[0]):
primo = r[0]
min_dl=ndx*0.02*2**(primo>10)
if dl_mat[primo].max()<min_dl:
return z
for i in r:
for j in range(len(xs)):
if dl_mat[i,j]>min_dl:
z.append(j)
return getInd(ri[j:], z)
xt = [i if (i in getInd()) else '' for i in range(K)]
pl.xticks(xs[1:], xt[1:], fontsize=10)
pl.yticks(fontsize=10)
#ax = pl.gca()
#for label in ax.get_xticklabels():
# label.set_bbox(dict(fc='w', ec='None', alpha=0.5))
# Remove the abscissa ticks and set up the axes limits.
for tick in ax.get_xticklines():
tick.set_visible(False)
pl.xlim(0, ndx)
min_y *= 1.01
max_y *= 1.01
pl.ylim(min_y, max_y)
for i,j in zip(xs[1:], xt[1:]):
pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')
if ndx>1:
lenticks = len(ax.get_ymajorticklabels()) - 1
if min_y<0: lenticks -= 1
if lenticks < 5:
from matplotlib.ticker import AutoMinorLocator as AML
ax.yaxis.set_minor_locator(AML())
pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)
pl.ylabel(r'$\mathrm{\langle{\frac{ \partial U } { \partial \lambda }}\rangle_{\lambda}\/%s}$' % P.units, fontsize=20, color='#151B54')
pl.annotate('$\mathit{\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')
if not P.software.title()=='Sire':
lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)
for l in lege.legendHandles:
l.set_linewidth(10)
pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))
pl.close(fig)
return
print "Plotting the free energy breakdown figure..."
plotdFvsLambda1()
plotdFvsLambda2()
if ('TI' in P.methods or 'TI-CUBIC' in P.methods):
print "Plotting the TI figure..."
plotTI()
#===================================================================================================
# FUNCTIONS: The Curve-Fitting Method. Called by the -c flag.
#===================================================================================================
def plotCFM(u_kln, N_k, num_bins=100):
"""A graphical representation of what Bennett calls 'Curve-Fitting Method'."""
print "Plotting the CFM figure..."
def leaveTicksOnlyOnThe(xdir, ydir, axis):
dirs = ['left', 'right', 'top', 'bottom']
axis.xaxis.set_ticks_position(xdir)
axis.yaxis.set_ticks_position(ydir)
return
def plotdg_vs_dU(yy, df_allk, ddf_allk):
sq = (len(yy))**0.5
h = int(sq)
w = h + 1 + 1*(sq-h>0.5)
scale = round(w/3., 1)+0.4 if len(yy)>13 else 1
sf = numpy.ceil(scale*3) if scale>1 else 0
fig = pl.figure(figsize = (8*scale,6*scale))
matplotlib.rc('axes', facecolor = '#E3E4FA')
matplotlib.rc('axes', edgecolor = 'white')
if P.bSkipLambdaIndex:
ks = [int(l) for l in P.bSkipLambdaIndex.split('-')]
ks = numpy.delete(numpy.arange(K+len(ks)), ks)
else:
ks = range(K)
for i, (xx_i, yy_i) in enumerate(yy):
ax = pl.subplot(h, w, i+1)
ax.plot(xx_i, yy_i, color='r', ls='-', lw=3, marker='o', mec='r')
leaveTicksOnlyOnThe('bottom', 'left', ax)
ax.locator_params(axis='x', nbins=5)
ax.locator_params(axis='y', nbins=6)
ax.fill_between(xx_i, df_allk[i]['BAR'] - ddf_allk[i]['BAR'], df_allk[i]['BAR'] + ddf_allk[i]['BAR'], color='#D2B9D3', zorder=-1)
ax.annotate(r'$\mathrm{%d-%d}$' % (ks[i], ks[i+1]), xy=(0.5, 0.9), xycoords=('axes fraction', 'axes fraction'), xytext=(0, -2), size=14, textcoords='offset points', va='top', ha='center', color='#151B54', bbox = dict(fc='w', ec='none', boxstyle='round', alpha=0.5))
pl.xlim(xx_i.min(), xx_i.max())
pl.annotate(r'$\mathrm{\Delta U_{i,i+1}\/(reduced\/units)}$', xy=(0.5, 0.03), xytext=(0.5, 0), xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54')
pl.annotate(r'$\mathrm{\Delta g_{i+1,i}\/(reduced\/units)}$', xy=(0.06, 0.5), xytext=(0, 0.5), rotation=90, xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54')
pl.savefig(os.path.join(P.output_directory, 'cfm.pdf'))
pl.close(fig)
return
def findOptimalMinMax(ar):
c = zip(*numpy.histogram(ar, bins=10))
thr = int(ar.size/8.)
mi, ma = ar.min(), ar.max()
for (i,j) in c:
if i>thr:
mi = j
break
for (i,j) in c[::-1]:
if i>thr:
ma = j
break
return mi, ma
def stripZeros(a, aa, b, bb):
z = numpy.array([a, aa[:-1], b, bb[:-1]])
til = 0
for i,j in enumerate(a):
if j>0:
til = i
break
z = z[:, til:]
til = 0
for i,j in enumerate(b[::-1]):
if j>0:
til = i
break
z = z[:, :len(a)+1-til]
a, aa, b, bb = z
return a, numpy.append(aa, 100), b, numpy.append(bb, 100)
K = len(u_kln)
yy = []
for k in range(0, K-1):
upto = min(N_k[k], N_k[k+1])
righ = -u_kln[k,k+1, : upto]
left = u_kln[k+1,k, : upto]
min1, max1 = findOptimalMinMax(righ)
min2, max2 = findOptimalMinMax(left)
mi = min(min1, min2)
ma = max(max1, max2)
(counts_l, xbins_l) = numpy.histogram(left, bins=num_bins, range=(mi, ma))
(counts_r, xbins_r) = numpy.histogram(righ, bins=num_bins, range=(mi, ma))
counts_l, xbins_l, counts_r, xbins_r = stripZeros(counts_l, xbins_l, counts_r, xbins_r)
counts_r, xbins_r, counts_l, xbins_l = stripZeros(counts_r, xbins_r, counts_l, xbins_l)
with numpy.errstate(divide='ignore', invalid='ignore'):
log_left = numpy.log(counts_l) - 0.5*xbins_l[:-1]
log_righ = numpy.log(counts_r) + 0.5*xbins_r[:-1]
diff = log_left - log_righ
yy.append((xbins_l[:-1], diff))
plotdg_vs_dU(yy, df_allk, ddf_allk)
return
#===================================================================================================
# MAIN
#===================================================================================================
def main():
global dhdlt
global u_klt
global P
global K
global n_components
global pymbar
global dhdl
global N_k
global lv
global dlam
global ave_dhdl
global std_dhdl
global lchange
global cubspl
global mapl
global u_kln
global Deltaf_ij
global dDeltaf_ij
global df_allk
global ddf_allk
global nsnapshots
global pl
global FP
global matplotlib
# Timing.
stime = ttt_time.time()
print "Started on %s" % ttt_time.asctime()
print 'Command line was: %s\n' % ' '.join(sys.argv)
# Simulation profile P (to be stored in 'results.pickle') will amass information about the simulation.
P = parser.parse_args()[0]
P.methods = getMethods(P.methods.upper())
P.units, P.beta, P.beta_report = checkUnitsAndMore(P.units)
if ''.join(P.methods).replace('TI-CUBIC', '').replace('TI', ''):
import pymbar ## this is not a built-in module ##
if (numpy.array([P.bForwrev, P.breakdown, P.bCFM, P.overlap]) != 0).any():
import matplotlib # 'matplotlib-1.1.0-1'; errors may pop up when using an earlier version
matplotlib.use('Agg')
import matplotlib.pyplot as pl
from matplotlib.font_manager import FontProperties as FP
if P.software.title() == 'Gromacs':
import parser_gromacs
nsnapshots, lv, dhdlt, u_klt = parser_gromacs.readDataGromacs(P)
elif P.software.title() == 'Sire':
import parser_sire
nsnapshots, lv, dhdlt, u_klt = parser_sire.readDataSire(P)
elif P.software.title() == 'Amber':
import parser_amber
nsnapshots, lv, dhdlt, u_klt = parser_amber.readDataAmber(P)
elif P.software.title() == 'Desmond':
import parser_desmond
#NML: Desmond FEP jobs will always output with these names
P.prefix='gibbs'
P.suffix='dE'
P.methods=['BAR'] #Given only dE of adj Lambdas, limiting to BAR only
nsnapshots, lv, u_klt = parser_desmond.readDataDesmond(P)
else:
from inspect import currentframe, getframeinfo
lineno = getframeinfo(currentframe()).lineno
print "\n\n%s\n Looks like there is no yet proper parser to process your files. \n Please modify lines %d and %d of this script.\n%s\n\n" % (78*"*", lineno+3, lineno+4, 78*"*")
#### LINES TO BE MODIFIED
#import YOUR_OWN_FILE_PARSER
#nsnapshots, lv, dhdlt, u_klt = YOUR_OWN_FILE_PARSER.yourDataParser(*args, **kwargs)
#### All the four are numpy arrays.
#### lv is the array of lambda vectors.
#### nsnapshots is the number of equilibrated snapshots per each state.
#### dhdlt[k,n,t] is the derivative of energy component n with respect to state k of snapshot t
#### u_klt[k,m,t] is the reduced potential energy of snapshot t of state k evaluated at state m
K, n_components = lv.shape
if (numpy.array(['Sire','Gromacs', 'Amber']) == P.software.title()).any():
dhdl, N_k, u_kln = uncorrelate(sta=numpy.zeros(K, int), fin=nsnapshots, do_dhdl=True)
elif P.software.title() == 'Desmond':
N_k, u_kln = uncorrelate(sta=numpy.zeros(K, int), fin=nsnapshots, do_dhdl=False)
# Estimate free energy difference with MBAR -- all states at once.
if 'MBAR' in P.methods:
Deltaf_ij, dDeltaf_ij = estimatewithMBAR(u_kln, N_k, P.relative_tolerance, regular_estimate=True)
# The TI preliminaries.
if ('TI' in P.methods or 'TI-CUBIC' in P.methods):
lchange, dlam, ave_dhdl, std_dhdl = TIprelim(lv)
if 'TI-CUBIC' in P.methods:
cubspl, mapl = getSplines(lchange)
# Call other methods. Print stats. Store results.
df_allk, ddf_allk = estimatePairs()
totalEnergies()
# Plot figures.
if P.bForwrev:
dF_t()
if P.breakdown:
plotdFvsLambda()
if P.bCFM:
if not (u_kln is None):
plotCFM(u_kln, N_k, 50)
print "\nTime spent: %s hours, %s minutes, and %s seconds.\nFinished on %s" % timeStatistics(stime)
if __name__ == "__main__":
main()
#===================================================================================================
# End of the script
#===================================================================================================
| lgpl-2.1 |
elingg/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 11 | 44167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes(
[axis] + list(labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(array_ops.gather(transposed.tensor, indexer),
temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, tc.Union(
slice, collections.Hashable, collections.Sequence)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
else:
# We're allowing anything NumPy treats as a scalar or 1D array.
value = np.asarray(value)
if value.ndim == 0:
slices[axis_name] = axis.index(value.item())
elif value.ndim == 1:
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value.tolist()]
indexers[axis_name] = ops.convert_to_tensor(
indexer, dtype=dtypes.int64)
else:
raise NotImplementedError(
'select does not yet support selections with more than one '
'dimension: %s on axis %r' % (value, axis_name))
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [core.convert_to_labeled_tensor(lt)
for lt in labeled_tensors]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike),
int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [core.convert_to_labeled_tensor(lt)
for lt in labeled_tensors]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike, tc.Optional(string_types),
tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values())
if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [core.convert_to_labeled_tensor(lt)
for lt in labeled_tensors]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(labeled_tensor.tensor,
shape,
seed=seed,
name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(tc.Collection(string_types)),
tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(labeled_tensor.tensor,
squeeze_dimensions,
name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(
string_types, tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [name for name in axis_scope_order
if name in result_axis_names]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(a_tensor,
b_tensor,
transpose_a=transpose_a,
transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(labeled_tensor.tensor,
reduction_dimensions,
keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [name for name in multiples
if labeled_tensor.axes[name].labels is not None]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(
labeled_tensor.tensor, padding_pairs, mode, name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(tc.Union(
core.Axes,
tc.Collection(tc.Union(string_types, core.AxisLike)))),
tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType),
tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType),
tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType),
tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(labeled_tensor.tensor,
msg=message,
name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
kaiserroll14/301finalproject | main/pandas/tseries/offsets.py | 9 | 87012 | from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.tseries.tools import to_datetime
from pandas.tseries.timedeltas import to_timedelta
from pandas.core.common import ABCSeries, ABCDatetimeIndex
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta
import functools
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd','CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'BusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def as_datetime(obj):
f = getattr(obj,'to_pydatetime',None)
if f is not None:
obj = f()
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslib.NaT:
return tslib.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = tslib.tz_convert_single(result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
return result
return wrapper
def apply_index_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
result = func(self, other)
if self.normalize:
result = result.to_period('D').to_timestamp()
return result
return wrapper
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0
or dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
return False
return True
#----------------------------------------------------------------------
# DateOffset
class ApplyTypeError(TypeError):
# sentinel class for catching the apply error to return NotImplemented
pass
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
_kwds_use_relativedelta = (
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond'
)
_use_relativedelta = False
_adjust_dst = False
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self._offset, self._use_relativedelta = self._determine_offset()
def _determine_offset(self):
# timedelta is used for sub-daily plural offsets and all singular offsets
# relativedelta is used for plural offsets of daily length or more
# nanosecond(s) are handled by apply_wraps
kwds_no_nanos = dict(
(k, v) for k, v in self.kwds.items()
if k not in ('nanosecond', 'nanoseconds')
)
use_relativedelta = False
if len(kwds_no_nanos) > 0:
if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):
use_relativedelta = True
offset = relativedelta(**kwds_no_nanos)
else:
# sub-daily offset - use timedelta (tz-aware)
offset = timedelta(**kwds_no_nanos)
else:
offset = timedelta(1)
return offset, use_relativedelta
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = tslib._localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
.. versionadded:: 0.17.0
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if not type(self) is DateOffset:
raise NotImplementedError("DateOffset subclass %s "
"does not have a vectorized "
"implementation"
% (self.__class__.__name__,))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(self.kwds).issubset(relativedelta_fast)):
months = ((self.kwds.get('years', 0) * 12
+ self.kwds.get('months', 0)) * self.n)
if months:
shifted = tslib.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (self.kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + i.to_perioddelta('W')
timedelta_kwds = dict((k,v) for k,v in self.kwds.items()
if k in ['days','hours','minutes',
'seconds','microseconds'])
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) %s not able to be "
"applied vectorized" %
(set(self.kwds) - relativedelta_fast),)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, normalize=self.normalize, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name','normalize', 'calendar']
attrs = [(k, v) for k, v in all_paras.items() if (k not in exclude ) and (k[0] != '_')]
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
attrs.append('='.join((attr, repr(kwds_new))))
else:
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def name(self):
if hasattr(self, '_named'):
return self._named
else:
return self.rule_code
def __eq__(self, other):
if other is None:
return False
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset.')
elif type(other) == type(self):
return self.__class__(self.n - other.n, normalize=self.normalize, **self.kwds)
else: # pragma: no cover
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other - self
return self.__class__(-self.n, normalize=self.normalize, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, normalize=self.normalize, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# helpers for vectorized offsets
def _beg_apply_index(self, i, freq):
"""Offsets index to beginning of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n < 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp()
return base + off
def _end_apply_index(self, i, freq):
"""Offsets index to end of Period frequency"""
off = i.to_perioddelta('D')
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
# when adding, dtates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix %s" % suffix)
return cls()
class BusinessMixin(object):
""" mixin to business types to provide related functions """
# TODO: Combine this with DateOffset by defining a whitelisted set of
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
return out
def _repr_attrs(self):
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
if n == 0 and result.weekday() > 4:
n -= 1
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHour(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
# must be validated here to equality check
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
# used for moving to next businessday
if self.n >= 0:
self.next_bday = BusinessDay(n=1)
else:
self.next_bday = BusinessDay(n=-1)
def _validate_time(self, t_input):
from datetime import time as dt_time
import time
if isinstance(t_input, compat.string_types):
try:
t = time.strptime(t_input, '%H:%M')
return dt_time(hour=t.tm_hour, minute=t.tm_min)
except ValueError:
raise ValueError("time data must match '%H:%M' format")
elif isinstance(t_input, dt_time):
if t_input.second != 0 or t_input.microsecond != 0:
raise ValueError("time data must be specified only with hour and minute")
return t_input
else:
raise ValueError("time data must be string or datetime.time")
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHour, self)._repr_attrs()
attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'),
self.end.strftime('%H:%M'))]
out += ': ' + ', '.join(attrs)
return out
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec()
if self.n >= 0:
dt = self._prev_opening_time(dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
# calcurate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight busienss hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calcurate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(result- timedelta(seconds=1)) +bhdelta
return result
else:
raise ApplyTypeError('Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec()
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calcurated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = tslib.tot_seconds(dt - op)
if span <= businesshours:
return True
else:
return False
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
def get_calendar(self, weekmask, holidays, calendar):
'''Generate busdaycalendar'''
if isinstance(calendar, np.busdaycalendar):
if not holidays:
holidays = tuple(calendar.holidays)
elif not isinstance(holidays, tuple):
holidays = tuple(holidays)
else:
# trust that calendar.holidays and holidays are
# consistent
pass
return calendar, holidays
if holidays is None:
holidays = []
try:
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
if holidays:
kwargs['holidays'] = holidays
try:
busdaycalendar = np.busdaycalendar(**kwargs)
except:
# Check we have the required numpy version
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError("CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " +
np.__version__)
else:
raise
return busdaycalendar, holidays
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
del state['calendar']
# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserilization
try:
state['kwds'].pop('calendar')
except:
pass
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
calendar, holidays = self.get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
dt = np.datetime64(dt)
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt,'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
return "%s-%s" % (self.rule_code, _int_to_month[self.n])
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
@apply_wraps
def apply(self, other):
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
months = self.n - 1 if self.n >= 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
_prefix = 'M'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
return other + relativedelta(months=n, day=1)
@apply_index_wraps
def apply_index(self, i):
months = self.n + 1 if self.n < 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == 1
_prefix = 'MS'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
_prefix = 'BM'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
if other.day > first and n <= 0:
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first, other.hour, other.minute,
other.second, other.microsecond)
return result
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
first_weekday, _ = tslib.monthrange(dt.year, dt.month)
if first_weekday == 5:
return dt.day == 3
elif first_weekday == 6:
return dt.day == 2
else:
return dt.day == 1
_prefix = 'BMS'
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
# First move to month offset
cur_mend = self.m_offset.rollforward(other)
# Find this custom month offset
cur_cmend = self.cbday.rollback(cur_mend)
# handle zero case. arbitrarily rollforward
if n == 0 and other != cur_cmend:
n += 1
if other < cur_cmend and n >= 1:
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
dt_in = other
# First move to month offset
cur_mbegin = self.m_offset.rollback(dt_in)
# Find this custom month offset
cur_cmbegin = self.cbday.rollforward(cur_mbegin)
# handle zero case. arbitrarily rollforward
if n == 0 and dt_in != cur_cmbegin:
n += 1
if dt_in > cur_cmbegin and n <= -1:
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
base = other
if self.weekday is None:
return other + self.n * self._inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
other = other
for i in range(k):
other = other + self._inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in range(-k):
other = other - self._inc
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return (i.to_period('W') + self.n).to_timestamp() + i.to_perioddelta('W')
else:
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() == self.weekday
_prefix = 'W'
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
suffix = '-%s' % (_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class WeekDay(object):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
_int_to_weekday = {
WeekDay.MON: 'MON',
WeekDay.TUE: 'TUE',
WeekDay.WED: 'WED',
WeekDay.THU: 'THU',
WeekDay.FRI: 'FRI',
WeekDay.SAT: 'SAT',
WeekDay.SUN: 'SUN'
}
_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())
class WeekOfMonth(DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
self.week = kwds['week']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
@apply_wraps
def apply(self, other):
base = other
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
other = self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
other = datetime(other.year, other.month, other.day, base.hour,
base.minute, base.second, base.microsecond)
return other
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
d = w.rollforward(d)
for i in range(self.week):
d = w.apply(d)
return d
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return d == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%d%s' % (self._prefix, self.week + 1,
_int_to_weekday.get(self.weekday, ''))
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = _weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of each month"
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self.kwds = kwds
@apply_wraps
def apply(self, other):
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
return self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
def getOffsetOfMonth(self, dt):
m = MonthEnd()
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
w = Week(weekday=self.weekday)
return w.rollback(eom)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
#: default month for __init__
_default_startingMonth = None
#: default month in _from_name
_from_name_startingMonth = None
_adjust_dst = True
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth',
self._default_startingMonth)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = _month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
# 'BQ'
_from_name_startingMonth = 12
_prefix = 'BQ'
@apply_wraps
def apply(self, other):
n = self.n
base = other
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
other = tslib._localize_pydatetime(other, base.tzinfo)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
_int_to_month = tslib._MONTH_ALIASES
_month_to_int = dict((v, k) for k, v in _int_to_month.items())
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
if n <= 0 and (monthsSince == 0 and other.day > first):
n = n + 1
# pretend to roll back if on same month but before firstbday
elif n > 0 and (monthsSince == 0 and other.day < first):
n = n - 1
# get the first bday for result
other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute, other.second,
other.microsecond)
return result
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth', 3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return MonthEnd().onOffset(dt) and modMonth == 0
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0:
# make sure you roll forward, so negate
monthsSince = monthsSince - 3
if n < 0 and (monthsSince == 0 and other.day > 1):
# after start, so come back an extra period as if rolled forward
n = n + 1
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.month = kwds.get('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = _month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
lastBDay = (days_in_month -
max(((wkday + days_in_month - 1) % 7) - 4, 0))
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = tslib.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month,
other.hour, other.minute, other.second,
other.microsecond)
if result.weekday() > 4:
result = result - BDay()
return result
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
years = n
if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
other = other + relativedelta(years=years)
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first, other.hour,
other.minute, other.second, other.microsecond)
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
@apply_wraps
def apply(self, other):
def _increment(date):
if date.month == self.month:
_, days_in_month = tslib.monthrange(date.year, self.month)
if date.day != days_in_month:
year = date.year
else:
year = date.year + 1
elif date.month < self.month:
year = date.year
else:
year = date.year + 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _rollf(date):
if date.month != self.month or\
date.day < tslib.monthrange(date.year, date.month)[1]:
date = _increment(date)
return date
n = self.n
result = other
if n > 0:
while n > 0:
result = _increment(result)
n -= 1
elif n < 0:
while n < 0:
result = _decrement(result)
n += 1
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
# convert month anchor to annual period tuple
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
wkday, days_in_month = tslib.monthrange(dt.year, self.month)
return self.month == dt.month and dt.day == days_in_month
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
@apply_wraps
def apply(self, other):
def _increment(date, n):
year = date.year + n - 1
if date.month >= self.month:
year += 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _decrement(date, n):
year = date.year + n + 1
if date.month < self.month or (date.month == self.month and
date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _rollf(date):
if (date.month != self.month) or date.day > 1:
date = _increment(date, 1)
return date
n = self.n
result = other
if n > 0:
result = _increment(result, n)
elif n < 0:
result = _decrement(result, n)
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
freqstr = 'A-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == 1
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_suffix_prefix_last = 'L'
_suffix_prefix_nearest = 'N'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds['startingMonth']
self.weekday = kwds["weekday"]
self.variation = kwds["variation"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('%s is not a valid variation' % self.variation)
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
self._rd_forward = relativedelta(weekday=weekday_offset)
self._rd_backward = relativedelta(weekday=weekday_offset(-1))
else:
self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
and self.startingMonth is not None \
and self.weekday is not None
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return year_end == dt or \
self.get_year_end(dt - relativedelta(months=1)) == dt
else:
return year_end == dt
@apply_wraps
def apply(self, other):
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)
cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)
next_year = tslib._localize_pydatetime(next_year, other.tzinfo)
if n > 0:
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other < prev_year:
year = other.year - 1
n -= 1
elif other < cur_year:
year = other.year
n -= 1
elif other < next_year:
year = other.year + 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year + n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
else:
n = -n
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other > next_year:
year = other.year + 1
n -= 1
elif other > cur_year:
year = other.year
n -= 1
elif other > prev_year:
year = other.year - 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year - n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
def get_year_end(self, dt):
if self.variation == "nearest":
return self._get_year_end_nearest(dt)
else:
return self._get_year_end_last(dt)
def get_target_month_end(self, dt):
target_month = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
next_month_first_of = target_month + relativedelta(months=+1)
return next_month_first_of + relativedelta(days=-1)
def _get_year_end_nearest(self, dt):
target_date = self.get_target_month_end(dt)
if target_date.weekday() == self.weekday:
return target_date
else:
forward = target_date + self._rd_forward
backward = target_date + self._rd_backward
if forward - target_date < target_date - backward:
return forward
else:
return backward
def _get_year_end_last(self, dt):
current_year = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
return current_year + self._offset_lwom
@property
def rule_code(self):
suffix = self.get_rule_code_suffix()
return "%s-%s" % (self._get_prefix(), suffix)
def _get_prefix(self):
return self._prefix
def _get_suffix_prefix(self):
if self.variation == "nearest":
return self._suffix_prefix_nearest
else:
return self._suffix_prefix_last
def get_rule_code_suffix(self):
return '%s-%s-%s' % (self._get_suffix_prefix(), \
_int_to_month[self.startingMonth], \
_int_to_weekday[self.weekday])
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError(
"Unable to parse varion_code: %s" % (varion_code,))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {
"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation,
}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
self._offset = FY5253( \
startingMonth=kwds['startingMonth'], \
weekday=kwds["weekday"],
variation=kwds["variation"])
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
@apply_wraps
def apply(self, other):
base = other
n = self.n
if n > 0:
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
start = other - self._offset
else:
start = other
qtr_lens = self.get_weeks(other + self._offset)
for weeks in qtr_lens:
start += relativedelta(weeks=weeks)
if start > other:
other = start
n -= 1
break
else:
n = -n
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
end = other + self._offset
else:
end = other
qtr_lens = self.get_weeks(other)
for weeks in reversed(qtr_lens):
end -= relativedelta(weeks=weeks)
if end < other:
other = end
n -= 1
break
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
if self._offset.onOffset(dt):
prev_year_end = dt - self._offset
next_year_end = dt
else:
next_year_end = dt + self._offset
prev_year_end = dt - self._offset
week_in_year = (next_year_end - prev_year_end).days / 7
return week_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens[0:4]:
current += relativedelta(weeks=qtr_len)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
return "%s-%s" % (self._prefix,
"%s-%d" % (suffix, self.qtr_with_extra_week))
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
'''
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
'''
_adjust_dst = True
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day)
currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)
# NOTE: easter returns a datetime.date so we have to convert to type of other
if self.n >= 0:
if other >= currentEaster:
new = easter(other.year + self.n)
else:
new = easter(other.year + self.n - 1)
else:
if other > currentEaster:
new = easter(other.year + self.n + 1)
else:
new = easter(other.year + self.n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
#----------------------------------------------------------------------
# Ticks
import operator
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return DateOffset.__eq__(self, other)
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params())
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return DateOffset.__ne__(self, other)
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return _delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else:
raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
_prefix = 'undefined'
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = _delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
_delta_to_nanoseconds = tslib._delta_to_nanoseconds
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def _get_firstbday(wkday):
"""
wkday is the result of monthrange(year, month)
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
if wkday == 5: # on Saturday
first = 3
elif wkday == 6: # on Sunday
first = 2
return first
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset %s did not increment date' % offset)
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset %s did not decrement date' % offset)
cur = next_date
prefix_mapping = dict((offset._prefix, offset) for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
])
prefix_mapping['N'] = Nano
def _make_offset(key):
"""Gets offset based on key. KeyError if prefix is bad, ValueError if
suffix is bad. All handled by `get_offset` in tseries/frequencies. Not
public."""
if key is None:
return None
split = key.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too many '-')
obj = klass._from_name(*split[1:])
obj._named = key
return obj
| gpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/formats/test_style.py | 5 | 36923 | import copy
import textwrap
import re
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
jinja2 = pytest.importorskip('jinja2')
from pandas.io.formats.style import Styler, _get_level_lengths # noqa
class TestStyler(object):
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(['color: %s' % foo], index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_init_non_pandas(self):
with pytest.raises(TypeError):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_copy(self):
s2 = copy.copy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is s2.ctx # shallow
assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx == s2.ctx
assert self.styler._todo == s2._todo
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is not s2.ctx
assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx != s2.ctx
assert s2._todo == []
assert self.styler._todo != s2._todo
def test_clear(self):
s = self.df.style.highlight_max()._compute()
assert len(s.ctx) > 0
assert len(s._todo) > 0
s.clear()
assert len(s.ctx) == 0
assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.render()
# An index but no columns
DataFrame(columns=['a']).style.render()
# A column but no index
DataFrame(index=['a']).style.render()
# No IndexError raised?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'is_visible': True, 'display_value': ''},
{'class': 'col_heading level0 col0',
'display_value': 'A',
'type': 'th',
'value': 'A',
'is_visible': True,
},
{'class': 'col_heading level0 col1',
'display_value': 'B',
'type': 'th',
'value': 'B',
'is_visible': True,
},
{'class': 'col_heading level0 col2',
'display_value': 'C',
'type': 'th',
'value': 'C',
'is_visible': True,
}]]
assert result['head'] == expected
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'B', 'display_value': 'B', 'is_visible': True},
{'class': 'col_heading level0 col1', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'blank', 'type': 'th', 'value': ''},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
expected = [[
{'class': 'blank', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'index_name level1', 'type': 'th',
'value': 'B'},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: %s' % x.max() for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = dict(((r, c), ['color: baz'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = dict(((r, c), ['foo: bar'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
assert result == expected
def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 0%)']
}
assert result == expected
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 0%)']
}
assert result == expected
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%'],
(0, 2): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(0, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #d65f5f 0.0%, '
'#d65f5f 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#d65f5f 10.0%, #d65f5f 10.0%, '
'transparent 10.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, #5fba7d 10.0%'
', #5fba7d 30.0%, transparent 30.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#5fba7d 10.0%, #5fba7d 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = pd.DataFrame({'A': [10, 20, 50, 100]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 20.0%, transparent 20.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 50.0%, transparent 50.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 100.0%, transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, '
'#d65f5f 0.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 40.0%, '
'#d65f5f 40.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 70.0%, '
'#d65f5f 70.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 80.0%, '
'#d65f5f 80.0%, #d65f5f 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='zero', color=[
'#d65f5f', '#5fba7d'], width=90)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 45.0%, '
'#d65f5f 45.0%, #d65f5f 50%, '
'transparent 50%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, '
'#5fba7d 50%, #5fba7d 50.0%, '
'transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 60.0%, transparent 60.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 95.0%, transparent 95.0%)']}
assert result == expected
def test_bar_bad_align_raises(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
with pytest.raises(ValueError):
df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
assert result == expected
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with pytest.raises(ValueError):
df.style
with pytest.raises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
assert styler is result
assert styler.caption == 'baz'
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
assert result is styler
assert result.uuid == 'aaa'
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = pd.DataFrame({'a': [1, 3, 5, 6], 'b': [2, 4, 12, 21]})
result = df.style.render(uuid='test')
assert 'test' in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
assert s.precision == 10
s = Styler(self.df, precision=2)
assert s.precision == 2
s2 = s.set_precision(4)
assert s is s2
assert s.precision == 4
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
assert result[(1, 1)] == ['color: red']
def test_trim(self):
result = self.df.style.render() # trim=True
assert result.count('#') == 0
result = self.df.style.highlight_max().render()
assert result.count('#') == len(self.df.columns)
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
assert result[(1, 1)] == ['background-color: yellow']
result = getattr(df.style, attr)(color='green')._compute().ctx
assert result[(1, 1)] == ['background-color: green']
result = getattr(df.style, attr)(subset='A')._compute().ctx
assert result[(1, 0)] == ['background-color: yellow']
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
assert result == expected
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
assert result == expected
# separate since we cant negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: %s' if x > 0 else 'color: %s' % z
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.render()
def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
assert all(['display_value' in c for c in row] for row in ctx['body'])
assert (all([len(c['display_value']) <= 3 for c in row[1:]]
for row in ctx['body']))
assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
with pytest.raises(TypeError):
df.style.format(5)
with pytest.raises(TypeError):
df.style.format(True)
def test_display_subset(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"},
subset=pd.IndexSlice[0, :])._translate()
expected = '0.1'
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1234'
assert ctx['body'][0][2]['display_value'] == '12.34%'
raw_11 = '1.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice['a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][0][2]['display_value'] == '0.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, 'a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[[0, 1], ['a']])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1'
assert ctx['body'][0][2]['display_value'] == '0.1234'
assert ctx['body'][1][2]['display_value'] == '1.1234'
def test_display_dict(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][2]['display_value'] == '12.34%'
df['c'] = ['aaa', 'bbb']
ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][3]['display_value'] == 'AAA'
def test_bad_apply_shape(self):
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(lambda x: 'x', subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: [''], subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', '', ''])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], subset=1)
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], axis=1)
def test_apply_bad_return(self):
def f(x):
return ''
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(TypeError):
df.style._apply(f, axis=None)
def test_apply_bad_labels(self):
def f(x):
return pd.DataFrame(index=[1, 2], columns=['a', 'b'])
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = pd.MultiIndex.from_product([['a', 'b'], [0, 1, 2]])
expected = {(0, 0): 3, (0, 3): 3, (1, 0): 1, (1, 1): 1, (1, 2): 1,
(1, 3): 1, (1, 4): 1, (1, 5): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = pd.MultiIndex.from_arrays([
[1, 1, 2, 1],
['a', 'b', 'b', 'd']
])
expected = {(0, 0): 2, (0, 2): 1, (0, 3): 1,
(1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_mi_sparse(self):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body_0 = result['body'][0][0]
expected_0 = {
"value": "a", "display_value": "a", "is_visible": True,
"type": "th", "attributes": ["rowspan=2"],
"class": "row_heading level0 row0", "id": "level0_row0"
}
tm.assert_dict_equal(body_0, expected_0)
body_1 = result['body'][0][1]
expected_1 = {
"value": 0, "display_value": 0, "is_visible": True,
"type": "th", "class": "row_heading level1 row0",
"id": "level1_row0"
}
tm.assert_dict_equal(body_1, expected_1)
body_10 = result['body'][1][0]
expected_10 = {
"value": 'a', "display_value": 'a', "is_visible": False,
"type": "th", "class": "row_heading level0 row1",
"id": "level0_row1"
}
tm.assert_dict_equal(body_10, expected_10)
head = result['head'][0]
expected = [
{'type': 'th', 'class': 'blank', 'value': '',
'is_visible': True, "display_value": ''},
{'type': 'th', 'class': 'blank level0', 'value': '',
'is_visible': True, 'display_value': ''},
{'type': 'th', 'class': 'col_heading level0 col0', 'value': 'A',
'is_visible': True, 'display_value': 'A'}]
assert head == expected
def test_mi_sparse_disabled(self):
with pd.option_context('display.multi_sparse', False):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body = result['body']
for row in body:
assert 'attributes' not in row[0]
def test_mi_sparse_index_names(self):
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
result = df.style._translate()
head = result['head'][1]
expected = [{
'class': 'index_name level0', 'value': 'idx_level_0',
'type': 'th'},
{'class': 'index_name level1', 'value': 'idx_level_1',
'type': 'th'},
{'class': 'blank', 'value': '', 'type': 'th'}]
assert head == expected
def test_mi_sparse_column_names(self):
df = pd.DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
[['a', 'a', 'b', 'a'], [0, 1, 1, 2]],
names=['idx_level_0', 'idx_level_1']),
columns=pd.MultiIndex.from_arrays(
[['C1', 'C1', 'C2', 'C2'], [1, 0, 1, 0]],
names=['col_0', 'col_1']
)
)
result = df.style._translate()
head = result['head'][1]
expected = [
{'class': 'blank', 'value': '', 'display_value': '',
'type': 'th', 'is_visible': True},
{'class': 'index_name level1', 'value': 'col_1',
'display_value': 'col_1', 'is_visible': True, 'type': 'th'},
{'class': 'col_heading level1 col0',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col1',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
{'class': 'col_heading level1 col2',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col3',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
]
assert head == expected
class TestStylerMatplotlibDep(object):
def test_background_gradient(self):
tm._skip_if_no_mpl()
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for c_map in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert all("#" in x[0] for x in result.values())
assert result[(0, 0)] == result[(0, 1)]
assert result[(1, 0)] == result[(1, 1)]
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
assert result[(1, 0)] == ['background-color: #fff7fb']
def test_block_names():
# catch accidental removal of a block
expected = {
'before_style', 'style', 'table_styles', 'before_cellstyle',
'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
'before_rows', 'tr', 'after_rows',
}
result = set(Styler.template.blocks)
assert result == expected
def test_from_custom_template(tmpdir):
p = tmpdir.mkdir("templates").join("myhtml.tpl")
p.write(textwrap.dedent("""\
{% extends "html.tpl" %}
{% block table %}
<h1>{{ table_title|default("My Table") }}</h1>
{{ super() }}
{% endblock table %}"""))
result = Styler.from_custom_template(str(tmpdir.join('templates')),
'myhtml.tpl')
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template is not Styler.template
styler = result(pd.DataFrame({"A": [1, 2]}))
assert styler.render()
def test_shim():
# https://github.com/pandas-dev/pandas/pull/16059
# Remove in 0.21
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
from pandas.formats.style import Styler as _styler # noqa
| mit |
wanggang3333/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
codeaudit/gpss-research | source/sandpit.py | 4 | 36385 | '''
Created on Nov 2012
@authors: James Robert Lloyd (jrl44@cam.ac.uk)
David Duvenaud (dkd23@cam.ac.uk)
Roger Grosse (rgrosse@mit.edu)
'''
import flexiblekernel as fk
import grammar
import gpml
import utils.latex
import numpy as np
import pylab
import scipy.io
import sys
import os
from job_controller import *
import flexiblekernel as fk
from flexiblekernel import ScoredKernel
import grammar
import gpml
import utils.latex
import utils.fear
from config import *
from utils import gaussians, psd_matrices
import numpy as np
nax = np.newaxis
import pylab
import scipy.io
import sys
import os
import tempfile
import subprocess
import time
import cblparallel
from cblparallel.util import mkstemp_safe
import re
import shutil
import random
def kernel_test():
k = fk.MaskKernel(4, 3, fk.SqExpKernel(0, 0))
print k.gpml_kernel_expression()
print k.pretty_print()
print '[%s]' % k.param_vector()
print 'kernel_test complete'
def base_kernel_test():
print [k.pretty_print() for k in fk.base_kernels(1)]
print 'base_kernel_test complete'
def expand_test():
k1 = fk.SqExpKernel(1, 1)
k2 = fk.SqExpPeriodicKernel(2, 2, 2)
e = fk.SumKernel([k1, k2])
g = grammar.OneDGrammar()
print ''
for f in grammar.expand(e, g):
#print f
print f.pretty_print()
print grammar.canonical(f).pretty_print()
print
print ' ***** duplicates removed *****'
print
kernels = grammar.expand(e, g)
for f in grammar.remove_duplicates(kernels):
print f.pretty_print()
print
print '%d originally, %d without duplicates' % (len(kernels), len(grammar.remove_duplicates(kernels)))
print 'expand_test complete'
def expand_test2():
k1 = fk.MaskKernel(2, 0, fk.SqExpKernel(1, 1))
k2 = fk.MaskKernel(2, 1, fk.SqExpPeriodicKernel(2, 2, 2))
e = fk.SumKernel([k1, k2])
g = grammar.MultiDGrammar(2)
print ''
for f in grammar.expand(e, g):
print f.pretty_print()
print grammar.canonical(f).pretty_print()
print
print ' ***** duplicates removed *****'
print
kernels = grammar.expand(e, g)
for f in grammar.remove_duplicates(kernels):
print f.pretty_print()
print
print '%d originally, %d without duplicates' % (len(kernels), len(grammar.remove_duplicates(kernels)))
print 'expand_test complete'
def load_mauna():
'''2011 Mauna dataset.'''
data_file = '../data/mauna.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def load_mauna_original():
"""
Original Mauna dataset made to match the experiments from Carl's book.
For details, see data/preprocess_mauna_2004.m
"""
data_file = '../data/mauna2003.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def call_gpml_test():
np.random.seed(0)
k = fk.SumKernel([fk.SqExpKernel(0, 0), fk.SqExpKernel(0, 0)])
print k.gpml_kernel_expression()
print k.pretty_print()
print '[%s]' % k.param_vector()
X, y = load_mauna()
N_orig = X.shape[0]
X = X[:N_orig//3, :]
y = y[:N_orig//3, :]
results = []
pylab.figure()
for i in range(15):
init_params = np.random.normal(size=k.param_vector().size)
#kernel_hypers, nll, nlls = gpml.optimize_params(k.gpml_kernel_expression(), k.param_vector(), X, y, return_all=True)
kernel_hypers, nll, nlls = gpml.optimize_params(k.gpml_kernel_expression(), init_params, X, y, return_all=True)
print "kernel_hypers =", kernel_hypers
print "nll =", nll
k_opt = k.family().from_param_vector(kernel_hypers)
print k_opt.gpml_kernel_expression()
print k_opt.pretty_print()
print '[%s]' % k_opt.param_vector()
pylab.semilogx(range(1, nlls.size+1), nlls)
results.append((kernel_hypers, nll))
pylab.draw()
print
print
results = sorted(results, key=lambda p: p[1])
for kernel_hypers, nll in results:
print nll, kernel_hypers
print "done"
def sample_mauna_best():
# This kernel was chosen from a run of Mauna datapoints.
kernel = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
X = np.linspace(0,50,500)
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X)
pylab.figure()
pylab.plot(X, sample)
pylab.title('( SqExp(ell=-0.7, sf=-1.3) + SqExp(ell=4.8, sf=2.3) ) \n x ( SqExp(ell=3.0, sf=0.5) + Periodic(ell=0.4, p=-0.0, sf=-0.9) )')
def sample_Carls_kernel():
kernel = fk.Carls_Mauna_kernel()
X = np.linspace(0,50,500)
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X)
pylab.figure()
pylab.plot(X, sample)
pylab.title('Carl''s kernel');
def compare_kernels_experiment():
kernel1 = fk.Carls_Mauna_kernel()
kernel2 = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
#kernel2 = ( SqExp(ell=-0.8, sf=-1.4) + Periodic(ell=0.5, p=-0.3, sf=-1.1) + RQ(ell=1.9, sf=1.6, a=0.2) + ( SqExp(ell=4.5, sf=1.0) x Periodic(ell=0.6, p=-0.0, sf=0.1) ) )
X, y = load_mauna_original()
N_orig = X.shape[0] # subsample data.
X = X[:N_orig//5, :]
y = y[:N_orig//5, :]
print "Carl's kernel"
print kernel1.pretty_print()
kernel_hypers1, nll1 = gpml.optimize_params(kernel1.gpml_kernel_expression(), kernel1.param_vector(), \
X, y, noise=np.log(0.19), iters=100 )
k1_opt = kernel1.family().from_param_vector(kernel_hypers1)
print k1_opt.pretty_print()
print "Carl's NLL =", nll1
print "Our kernel"
print kernel2.pretty_print()
kernel_hypers2, nll2 = gpml.optimize_params(kernel2.gpml_kernel_expression(), kernel2.param_vector(), \
X, y, noise=np.log(0.19), iters=100)
k2_opt = kernel2.family().from_param_vector(kernel_hypers2)
print k2_opt.pretty_print()
print "Our NLL =", nll2
def simple_mauna_experiment():
'''A first version of an experiment learning kernels'''
seed_kernels = [fk.SqExpKernel(0, 0)]
X, y = load_mauna_original()
N_orig = X.shape[0] # subsample data.
X = X[:N_orig//3, :]
y = y[:N_orig//3, :]
max_depth = 4
k = 4 # Expand k best
nll_key = 1
laplace_key = 2
results = []
for dummy in range(max_depth):
new_results = structure_search.try_expanded_kernels(X, y, D=2, seed_kernels=seed_kernels, verbose=False)
results = results + new_results
print
results = sorted(results, key=lambda p: p[nll_key], reverse=True)
for kernel, nll, laplace in results:
print nll, laplace, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[nll_key])[0:k]]
def plot_Carls_kernel():
kernel = fk.Carls_Mauna_kernel()
X = np.linspace(0,10,1000)
sigma = gpml.plot_kernel(kernel, X)
pylab.figure()
pylab.plot(X, sigma)
pylab.title('Carl''s kernel');
def plot_our_kernel():
kernel = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
X = np.linspace(0,10,1000)
sigma = gpml.plot_kernel(kernel, X)
pylab.figure()
pylab.plot(X, sigma)
pylab.title('Our kernel');
def load_simple_gef_load():
'''Zone 1 and temperature station 2'''
data_file = '../data/gef_load_simple.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def load_full_gef_load():
'''20 Zones in y, time and 11 temp stations in X'''
data_file = '../data/gef_load_full_Xy.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def simple_gef_load_experiment(verbose=True):
'''A first version of an experiment learning kernels'''
seed_kernels = [fk.MaskKernel(2, 0, fk.SqExpKernel(0, 0)),
fk.MaskKernel(2, 1, fk.SqExpKernel(0, 0))]
X, y = load_simple_gef_load()
# subsample data.
X = X[0:99, :]
y = y[0:99, :]
max_depth = 5
k = 2 # Expand k best
nll_key = 1
BIC_key = 2
active_key = BIC_key
results = []
for dummy in range(max_depth):
new_results = structure_search.try_expanded_kernels(X, y, D=2, seed_kernels=seed_kernels, verbose=verbose)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, BIC in results:
print nll, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
def full_gef_load_experiment(zone=1, max_depth=5, verbose=True):
'''Round 2'''
# seed_kernels = [fk.MaskKernel(2, 0, fk.SqExpKernel(0, 0)),
# fk.MaskKernel(2, 1, fk.SqExpKernel(0, 0))]
seed_kernels = [fk.MaskKernel(12, i, fk.SqExpKernel(0., 0.)) for i in range(12)] + \
[fk.MaskKernel(12, i, fk.SqExpPeriodicKernel(0., 0., 0.)) for i in range(12)] + \
[fk.MaskKernel(12, i, fk.RQKernel(0., 0., 0.)) for i in range(12)]
X, y = load_full_gef_load()
# subsample data.
X = X[0:299, :]
y = y[0:299, zone-1]
# max_depth = 5
k = 2 # Expand k best
nll_key = 1
BIC_key = 2
active_key = BIC_key
results = []
for i in range(max_depth):
if i:
expand = True
else:
expand = False
new_results = structure_search.try_expanded_kernels(X, y, D=12, seed_kernels=seed_kernels, expand=expand, verbose=verbose)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, BIC in results:
print nll, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
#os.system(command_str)
#### Attempt at sending individual jobs to the cluster
import pysftp, tempfile, config, subprocess, config, time
nax = np.newaxis
def mkstemp_safe(directory, suffix):
(os_file_handle, file_name) = tempfile.mkstemp(dir=directory, suffix=suffix)
os.close(os_file_handle)
return file_name
def fear_connect():
return pysftp.Connection('fear', username=config.USERNAME, password=config.PASSWORD)
def fear_command(cmd, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute(cmd)
if fear is None:
srv.close()
return output
def copy_to_fear(local_path, remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
srv.put(local_path, remote_path)
if fear is None:
srv.close()
def copy_from_fear(remote_path, local_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
srv.get(remote_path, local_path)
if fear is None:
srv.close()
def fear_rm(remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute('rm %s' % remote_path)
if fear is None:
srv.close()
return output
def fear_file_exists(remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
response = srv.execute('if [ -e %s ] \nthen \necho ''exists'' \nfi' % remote_path)
if fear is None:
srv.close()
return response == ['exists\n']
def fear_qdel_all(fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute('. /usr/local/grid/divf2/common/settings.sh; qdel -u %s' % config.USERNAME)
if fear is None:
srv.close()
return output
def qsub_matlab_code(code, verbose=True, local_dir ='../temp/', remote_dir ='./temp/', fear=None):
# Write to a temp script
script_file = mkstemp_safe(local_dir, '.m')
shell_file = mkstemp_safe(local_dir, '.sh')
f = open(script_file, 'w')
f.write(code)
f.close()
#### Local file reference without extension - MATLAB fails silently otherwise
f = open(shell_file, 'w')
f.write('/usr/local/apps/matlab/matlabR2011b/bin/matlab -nosplash -nojvm -nodisplay -singleCompThread -r ' + script_file.split('/')[-1].split('.')[0] + '\n')
f.close()
# Copy this to fear
copy_to_fear(script_file, remote_dir + script_file.split('/')[-1], fear)
copy_to_fear(shell_file, remote_dir + shell_file.split('/')[-1], fear)
# Create fear call
#### WARNING - hardcoded path 'temp'
fear_string = ' '.join(['. /usr/local/grid/divf2/common/settings.sh;',
'cd temp;'
'chmod +x %s;' % shell_file.split('/')[-1],
'qsub -l lr=0',
shell_file.split('/')[-1] + ';',
'cd ..'])
if verbose:
print 'Submitting : %s' % fear_string
# Send this command to fear
fear_command(fear_string, fear)
# Tell the caller where the script file was written
return script_file, shell_file
def re_qsub(shell_file, verbose=True, fear=None):
# Create fear call
#### WARNING - hardcoded path 'temp'
fear_string = ' '.join(['. /usr/local/grid/divf2/common/settings.sh;',
'cd temp;'
'chmod +x %s;' % shell_file.split('/')[-1],
'qsub -l lr=0',
shell_file.split('/')[-1] + ';',
'cd ..'])
if verbose:
print 'Re-submitting : %s' % fear_string
# Send this command to fear
fear_command(fear_string, fear)
# Matlab code to optimise hyper-parameters on one file, given one kernel.
OPTIMIZE_KERNEL_CODE = r"""
%% Load the data, it should contain X and y.
a = 'trying to load data files'
load '%(datafile)s'
a = 'loaded data files'
%% Load GPML
addpath(genpath('%(gpml_path)s'));
a = 'loaded GPML'
%% Set up model.
meanfunc = {@meanConst}
hyp.mean = mean(y)
covfunc = %(kernel_family)s
hyp.cov = %(kernel_params)s
likfunc = @likGauss
hyp.lik = %(noise)s
[hyp_opt, nlls] = minimize(hyp, @gp, -%(iters)s, @infExact, meanfunc, covfunc, likfunc, X, y);
best_nll = nlls(end)
laplace_nle = best_nll %% HACK HACK HACK
save( '%(writefile)s', 'hyp_opt', 'best_nll', 'nlls', 'laplace_nle' );
a = 'Goodbye, World!'
exit();
"""
def fear_run_experiments(kernels, X, y, return_all=False, verbose=True, noise=None, iters=300, local_dir ='../temp/', remote_dir ='./temp/', \
sleep_time=10, n_sleep_timeout=6, re_submit_wait=60):
'''
Sends jobs to fear, waits for them, returns the results
'''
# Not sure what this is for
if X.ndim == 1:
X = X[:, nax]
if y.ndim == 1:
y = y[:, nax]
if noise is None:
noise = np.log(np.var(y)/10) #### Just a heuristic.
data = {'X': X, 'y': y}
# Setup the connection to fear
fear = fear_connect()
# Submit all the jobs and remember where we put them
data_files = []
write_files = []
script_files = []
shell_files = []
for kernel in kernels:
# Create data file and results file
data_files.append(mkstemp_safe(local_dir, '.mat'))
write_files.append(mkstemp_safe(local_dir, '.mat'))
# Save data
scipy.io.savemat(data_files[-1], data)
# Copy files to fear
copy_to_fear(data_files[-1], remote_dir + data_files[-1].split('/')[-1], fear)
# copy_to_fear(write_files[-1], remote_dir + write_files[-1].split('/')[-1])
# Create MATLAB code
code = OPTIMIZE_KERNEL_CODE % {'datafile': data_files[-1].split('/')[-1],
'writefile': write_files[-1].split('/')[-1],
'gpml_path': config.FEAR_GPML_PATH,
'kernel_family': kernel.gpml_kernel_expression(),
'kernel_params': '[ %s ]' % ' '.join(str(p) for p in kernel.param_vector()),
'noise': str(noise),
'iters': str(iters)}
# Submit this to fear and save the file names
script_file, shell_file = qsub_matlab_code(code=code, verbose=verbose, local_dir=local_dir, remote_dir=remote_dir, fear=fear)
script_files.append(script_file)
shell_files.append(shell_file)
# Let the scripts run
# if verbose:
# print 'Giving the jobs some time to run'
# time.sleep(re_submit_wait)
# Wait for and read in results
fear_finished = False
job_finished = [False] * len(write_files)
results = [None] * len(write_files)
sleep_count = 0
while not fear_finished:
for (i, write_file) in enumerate(write_files):
if not job_finished[i]:
if fear_file_exists(remote_dir + write_file.split('/')[-1], fear):
# Another job has finished
job_finished[i] = True
sleep_count = 0
# Copy files
os.remove(write_file) # Not sure if necessary
copy_from_fear(remote_dir + write_file.split('/')[-1], write_file, fear)
# Read results ##### THIS WILL CHANGE IF RUNNING DIFFERENT TYPE OF EXPERIMENT
gpml_result = scipy.io.loadmat(write_file)
optimized_hypers = gpml_result['hyp_opt']
nll = gpml_result['best_nll'][0, 0]
# nlls = gpml_result['nlls'].ravel()
laplace_nle = gpml_result['laplace_nle'][0, 0]
kernel_hypers = optimized_hypers['cov'][0, 0].ravel()
k_opt = kernels[i].family().from_param_vector(kernel_hypers)
BIC = 2 * nll + len(kernel_hypers) * np.log(y.shape[0])
results[i] = (k_opt, nll, laplace_nle, BIC)
# Tidy up
fear_rm(remote_dir + data_files[i].split('/')[-1], fear)
fear_rm(remote_dir + write_files[i].split('/')[-1], fear)
fear_rm(remote_dir + script_files[i].split('/')[-1], fear)
fear_rm(remote_dir + shell_files[i].split('/')[-1], fear)
fear_rm(remote_dir + shell_files[i].split('/')[-1] + '*', fear)
os.remove(data_files[i])
os.remove(write_files[i])
os.remove(script_files[i])
os.remove(shell_files[i])
# Tell the world
if verbose:
print '%d / %d jobs complete' % (sum(job_finished), len(job_finished))
if sum(job_finished) == len(job_finished):
fear_finished = True
if not fear_finished:
if verbose:
print 'Sleeping'
sleep_count += 1
if sleep_count < n_sleep_timeout:
time.sleep(sleep_time)
else:
# Jobs taking too long - assume failure - resubmit
fear_qdel_all(fear)
for (i, shell_file) in enumerate(shell_files):
if not job_finished[i]:
re_qsub(shell_file, verbose=verbose, fear=fear)
if verbose:
print 'Giving the jobs some time to run'
time.sleep(re_submit_wait)
sleep_count = 0
fear.close()
return results
def fear_load_mat(data_file, y_dim=1):
'''Load a Matlab file'''
data = scipy.io.loadmat(data_file)
return data['X'], data['y'][:,y_dim-1], np.shape(data['X'])[1]
def fear_expand_kernels(D, seed_kernels, verbose=False):
'''
Just expands
'''
g = grammar.MultiDGrammar(D)
print 'Seed kernels :'
for k in seed_kernels:
print k.pretty_print()
kernels = []
for k in seed_kernels:
kernels = kernels + grammar.expand(k, g)
kernels = grammar.remove_duplicates(kernels)
print 'Expanded kernels :'
for k in kernels:
print k.pretty_print()
return (kernels)
def fear_experiment(data_file, results_filename, y_dim=1, subset=None, max_depth=2, k=2, verbose=True, sleep_time=60, n_sleep_timeout=20, re_submit_wait=60, \
description=''):
'''Recursively search for the best kernel'''
X, y, D = fear_load_mat(data_file, y_dim)
# Subset if necessary
if not subset is None:
X = X[subset, :]
y = y[subset]
##### This should be abstracted
seed_kernels = [fk.MaskKernel(D, i, fk.SqExpKernel(0., 0.)) for i in range(D)] + \
[fk.MaskKernel(D, i, fk.SqExpPeriodicKernel(0., 0., 0.)) for i in range(D)] + \
[fk.MaskKernel(D, i, fk.RQKernel(0., 0., 0.)) for i in range(D)]
nll_key = 1
laplace_key = 2
BIC_key = 3
active_key = BIC_key
results = []
results_sequence = []
for r in range(max_depth):
if r == 0:
new_results = fear_run_experiments(seed_kernels, X, y, verbose=verbose, \
sleep_time=sleep_time, n_sleep_timeout=n_sleep_timeout, re_submit_wait=re_submit_wait)
else:
new_results = fear_run_experiments(fear_expand_kernels(D, seed_kernels, verbose=verbose), X, y, verbose=verbose, \
sleep_time=sleep_time, n_sleep_timeout=n_sleep_timeout, re_submit_wait=re_submit_wait)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, laplace, BIC in results:
print nll, laplace, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
results_sequence.append(results)
# Write results to a file
results = sorted(results, key=lambda p: p[active_key], reverse=True)
with open(results_filename, 'w') as outfile:
outfile.write('Experiment results for\n datafile = %s\n y_dim = %d\n subset = %s\n max_depth = %f\n k = %f\n Description = %s\n\n' % (data_file, y_dim, subset, max_depth, k, description))
for (i, results) in enumerate(results_sequence):
outfile.write('\n%%%%%%%%%% Level %d %%%%%%%%%%\n\n' % i)
for kernel, nll, laplace, BIC in results:
outfile.write( 'nll=%f, laplace=%f, BIC=%f, kernel=%s\n' % (nll, laplace, BIC, kernel.__repr__()))
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
def plot_gef_load_Z01_raw():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Time")
host.set_ylabel("Load (Z01)")
par1.set_ylabel("Temperature (T09)")
p1, = host.plot(X[0:499,0], y[0:499])
p2, = par1.plot(X[0:499,0], X[0:499,9])
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_split_mean():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499])
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499])
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Time")
host.set_ylabel("Periodic component")
plt.title('Posterior mean functions')
par1.set_ylabel("Smooth component")
p1, = host.plot(X[0:499,0], posterior_mean_1)
p2, = par1.plot(X[0:499,0], posterior_mean_2)
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_split_mean_temp():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499], iters=10)
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499], iters=10)
plt.figure()
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Temperature (T09)")
# par1.set_ylabel("Periodic component")
plt.title('Posterior mean function')
host.set_ylabel("Load posterior mean")
p2, = host.plot(X[0:499,9], y[0:499], 'o', alpha=0.5)
p1, = host.plot(X[0:499,9], posterior_mean_2, 'o')
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
# par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_smooth_2d_mean():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
min_T = -3.0
max_T = 1.0
N_T = 10
temps = np.repeat(np.linspace(min_T, max_T, N_T), 499)
input = np.tile(X[0:499,:], (N_T, 1))
input[:,9] = temps
posterior_mean = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499], input, iters=300)
X_plt = X[0:499,0]
Y_plt = np.linspace(min_T, max_T, N_T)
Z_plt = np.reshape(posterior_mean, (N_T, 499), 'A')
data = {'X': X_plt, 'Y': Y_plt, 'Z': Z_plt, 'post_mean': posterior_mean}
scipy.io.savemat('temp_data.mat', data)
def plot_gef_load_Z01():
# This kernel was chosen from a run of gef_load datapoints.
# kernel = eval(ProductKernel([ covMask(ndim=12, active_dimension=0, base_kernel=RQKernel(lengthscale=0.268353, output_variance=-0.104149, alpha=-2.105742)), covMask(ndim=12, active_dimension=9, base_kernel=SqExpKernel(lengthscale=1.160242, output_variance=0.004344)), SumKernel([ covMask(ndim=12, active_dimension=0, base_kernel=SqExpPeriodicKernel(lengthscale=-0.823413, period=0.000198, output_variance=-0.917064)), covMask(ndim=12, active_dimension=0, base_kernel=RQKernel(lengthscale=-0.459219, output_variance=-0.077250, alpha=-2.212718)) ]) ]))
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X[0:499,:])
pylab.figure()
pylab.plot(X[0:499,0], y[0:499])
pylab.title('GEFCom2012 Z01 and T09 - first 500 data points')
pylab.xlabel('Time')
pylab.ylabel('Load')
# pylab.figure()
# pylab.plot(X[0:499,0], sample)
# pylab.title('GEF load Z01 - a sample from the learnt kernel')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
#
# kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
# fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
#
# posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499])
#
# pylab.figure()
# pylab.plot(X[0:499,0], posterior_mean_1)
# pylab.title('GEF load Z01 - periodic posterior mean component')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
#
# kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
# fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
#
# posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499])
#
# pylab.figure()
# pylab.plot(X[0:499,0], posterior_mean_2)
# pylab.title('GEF load Z01 - smooth posterior mean component')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
def main():
# Run everything
# fear_experiment('../data/abalone_500.mat', '../results/abalone_500_01.txt', max_depth=4, k=3)
# fear_experiment('../data/gef_load_full_Xy.mat', '../results/gef_load_500_Z01_02.txt', max_depth=6, k=5, subset=range(500), y_dim=1, description = 'BIC, 0 init')
# fear_experiment('../data/gef_load_full_Xy.mat', '../results/gef_load_500_Z09_02.txt', max_depth=6, k=5, subset=range(500), y_dim=9, description = 'BIC, 0 init')
fear_experiment('../data/bach_synth_r_200.mat', '../results/bach_synth_r_200_test.txt', max_depth=2, k=1, description = 'Dave test')
# fear_experiment('../data/housing.mat', '../results/housing_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/mauna2003.mat', '../results/mauna2003_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/mauna2011.mat', '../results/mauna2011_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/prostate.mat', '../results/prostate_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/pumadyn256.mat', '../results/pumadyn256_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_concrete_100.mat', '../results/r_concrete_100_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_concrete_500.mat', '../results/r_concrete_500_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_solar_500.mat', '../results/r_solar_500_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/unicycle_pitch_angle_400.mat', '../results/unicycle_pitch_angle_400_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/unicycle_pitch_ang_vel_400.mat', '../results/unicycle_pitch_ang_vel_400_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
def debug_laplace():
# Load data set
X, y, D, Xtest, ytest = gpml.load_mat('../data/kfold_data/r_concrete_500_fold_10_of_10.mat', y_dim=1)
# Load the suspicious kernel
sk = fk.repr_string_to_kernel('ScoredKernel(k_opt=ProductKernel([ MaskKernel(ndim=8, active_dimension=0, base_kernel=CubicKernel(offset=1.757755, output_variance=7.084045)), MaskKernel(ndim=8, active_dimension=7, base_kernel=SqExpPeriodicKernel(lengthscale=-2.701080, period=-0.380918, output_variance=-0.071214)) ]), nll=6348.096611, laplace_nle=-184450132.068237, bic_nle=12720.630212, noise=[-1.77276072])')
# Create some code to evaluate it
if X.ndim == 1: X = X[:, nax]
if y.ndim == 1: y = y[:, nax]
ndata = y.shape[0]
# Create data file
data_file = cblparallel.create_temp_file('.mat')
scipy.io.savemat(data_file, {'X': X, 'y': y}) # Save regression data
# Move to fear
cblparallel.copy_to_remote(data_file)
scripts = [gpml.OPTIMIZE_KERNEL_CODE % {'datafile': data_file.split('/')[-1],
'writefile': '%(output_file)s', # N.B. cblparallel manages output files
'gpml_path': cblparallel.gpml_path(local_computation=False),
'kernel_family': sk.k_opt.gpml_kernel_expression(),
'kernel_params': '[ %s ]' % ' '.join(str(p) for p in sk.k_opt.param_vector()),
'noise': str(sk.noise),
'iters': str(300)}]
#### Need to be careful with % signs
#### For the moment, cblparallel expects no single % signs - FIXME
scripts[0] = re.sub('% ', '%% ', scripts[0])
# Test
scripts[0] = re.sub('delta = 1e-6', 'delta = 1e-6', scripts[0])
#scripts[0] = re.sub('hyp.lik = [-1.77276072]', 'hyp.lik = [-0.77276072]', scripts[0])
output_file = cblparallel.run_batch_on_fear(scripts, language='matlab', max_jobs=600)[0]
# Read in results
output = gpml.read_outputs(output_file)
result = ScoredKernel.from_matlab_output(output, sk.k_opt.family(), ndata)
print result
print output.hessian
os.remove(output_file)
# Remove temporary data file (perhaps on the cluster server)
cblparallel.remove_temp_file(data_file, local_computation=False)
def debug_descriptions():
ck = fk.Carls_Mauna_kernel()
print ck.english()
| mit |
gregcaporaso/scikit-bio | skbio/stats/power.py | 3 | 47995 | r"""
Empirical Power Estimation (:mod:`skbio.stats.power`)
=====================================================
.. currentmodule:: skbio.stats.power
The purpose of this module is to provide empirical, post-hoc power estimation
of normally and non-normally distributed data. It also provides support to
subsample data to facilitate this analysis.
The underlying principle is based on subsampling and Monte Carlo simulation.
Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
\neq ... \neq x_{n}`.
Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
we know we should reject the null hypothesis. If we fail to reject the null
hypothesis, we have committed a Type II error and our result is a false
negative. We can estimate the frequency of Type II errors at various sampling
depths by repeatedly subsampling the populations and observing how often we
see a false negative. If we repeat this several times for each subsampling
depth, and vary the depths we use, we can start to approximate a relationship
between the number of samples we use and the rate of false negatives, also
called the statistical power of the test.
To generate complete power curves from data which appears underpowered, the
`statsmodels.stats.power` package can be used to solve for an effect size. The
effect size can be used to extrapolate a power curve for the data.
Most functions in this module accept a statistical test function which takes a
list of samples and returns a p value. The test is then evaluated over a series
of subsamples.
Sampling may be handled in two ways. For any set of samples, we may simply
choose to draw :math:`n` observations at random for each sample. Alternatively,
if metadata is available, samples can be matched based on a set of control
categories so that paired samples are drawn at random from the set of available
matches.
Functions
---------
.. autosummary::
:toctree:
subsample_power
subsample_paired_power
confidence_bound
paired_subsamples
Examples
--------
Suppose we wanted to test that there's a relationship between two random
variables, `ind` and `dep`. Let's use random subsampling to estimate the
statistical power of our test with an alpha of 0.1, 0.01, and 0.001.
To control for the pseudo-random number generation, we will use a seed.
When using these functions with your own data, you don't need to include the
step.
>>> import numpy as np
>>> np.random.seed(20)
>>> ind = np.random.randint(0, 20, 15)
>>> ind
array([ 3, 15, 9, 11, 7, 2, 0, 8, 19, 16, 6, 6, 16, 9, 5])
>>> dep = (3 * ind + 5 + np.random.randn(15) * 5).round(3)
>>> dep
array([ 15.617, 47.533, 28.04 , 33.788, 19.602, 12.229, 4.779,
36.838, 67.256, 55.032, 22.157, 7.051, 58.601, 38.664,
18.783])
Let's define a test that will draw a list of sample pairs and determine
if they're correlated. We'll use `scipy.stats.pearsonr` which takes two arrays
and returns a correlation coefficient and a p-value representing the
probability the two distributions are correlated.
>>> from scipy.stats import pearsonr
>>> f = lambda x: pearsonr(x[0], x[1])[1]
Now, let's use random sampling to estimate the power of our test on
the first distribution.
>>> samples = [ind, dep]
>>> print("%.3e" % f(samples))
3.646e-08
In `subsample_power`, we can maintain a paired relationship between samples
by setting `draw_mode` to "matched". We can also set our critical value, so
that we estimate power for a critical value of :math:`\alpha = 0.05`, an
estimate for the critical value of 0.01, and a critical value of 0.001.
>>> from skbio.stats.power import subsample_power
>>> pwr_100, counts_100 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.1,
... num_iter=25)
>>> pwr_010, counts_010 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.01,
... num_iter=25)
>>> pwr_001, counts_001 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.001,
... num_iter=25)
>>> counts_100
array([3, 4, 5, 6, 7, 8, 9])
>>> pwr_100.mean(0)
array([ 0.484, 0.844, 0.932, 0.984, 1. , 1. , 1. ])
>>> pwr_010.mean(0)
array([ 0.044, 0.224, 0.572, 0.836, 0.928, 0.996, 1. ])
>>> pwr_001.mean(0)
array([ 0. , 0.016, 0.108, 0.332, 0.572, 0.848, 0.956])
Based on this power estimate, as we increase our confidence that we have not
committed a type I error and identified a false positive, the number of samples
we need to be confident that we have not committed a type II error increases.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import numpy as np
import scipy.stats
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Subsamples data to iteratively calculate power
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
alpha_pwr : float, optional
The critical value used to calculate the power.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point
on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array has
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Let's say we wanted to look at the relationship between the presence of a
specific bacteria, *Gardnerella vaginalis* in the vaginal community, and
the probability of a pre or post menopausal woman experiencing a urinary
tract infection (UTI). Healthy women were enrolled in the study either
before or after menopause, and followed for eight weeks. Participants
submitted fecal samples at the beginning of the study, and were then
followed for clinical symptoms of a UTI. A confirmed UTI was an endpoint
in the study.
Using available literature and 16S sequencing, a set of candidate taxa were
identified as correlated with UTIs, including *G. vaginalis*. In the 100
women (50 premenopausal and 50 postmenopausal samples) who had UTIs, the
presence or absence of *G. vaginalis* was confirmed with quantitative PCR.
We can model the probability that detectable *G. vaginalis* was found in
these samples using a binomial model. (*Note that this is a simulation.*)
>>> import numpy as np
>>> np.random.seed(25)
>>> pre_rate = np.random.binomial(1, 0.85, size=(50,))
>>> pre_rate.sum()
45
>>> pos_rate = np.random.binomial(1, 0.40, size=(50,))
>>> pos_rate.sum()
21
Let's set up a test function, so we can test the probability of
finding a difference in frequency between the two groups. We'll use
`scipy.stats.chisquare` to look for the difference in frequency between
groups.
>>> from scipy.stats import chisquare
>>> test = lambda x: chisquare(np.array([x[i].sum() for i in
... range(len(x))]))[1]
Let's make sure that our two distributions are different.
>>> print(round(test([pre_rate, pos_rate]), 3))
0.003
Since there are an even number of samples, and we don't have enough
information to try controlling the data, we'll use
`skbio.stats.power.subsample_power` to compare the two groups. If we had
metadata about other risk factors, like a reproductive history, BMI,
tobacco use, we might want to use
`skbio.stats.power.subsample_paired_power`.
We'll also use "ind" `draw_mode`, since there is no linkage between the
two groups of samples.
>>> from skbio.stats.power import subsample_power
>>> pwr_est, counts = subsample_power(test=test,
... samples=[pre_rate, pos_rate],
... num_iter=100,
... num_runs=5,
... counts_interval=5)
>>> counts
array([ 5, 10, 15, 20, 25, 30, 35, 40, 45])
>>> np.nanmean(pwr_est, axis=0) # doctest: +NORMALIZE_WHITESPACE
array([ 0.056, 0.074, 0.226, 0.46 , 0.61 , 0.806, 0.952, 1. ,
1. ])
>>> counts[np.nanmean(pwr_est, axis=0) > 0.8].min()
30
So, we can estimate that we will see a significant difference in the
presence of *G. vaginalis* in the stool of pre and post women with UTIs if
we have at least 30 samples per group.
If we wanted to test the relationship of a second candidate taxa which is
more rare in the population, but may have a similar effect, based on
available literature, we might also start by trying to identify 30
samples per group where the second candidate taxa is present.
Suppose, now, that we want to test that a secondary metabolite seen only in
the presence of *G vaginalis* to see if it is also correlated with UTIs. We
can model the abundance of the metabolite as a normal distribution.
>>> met_pos = (np.random.randn(pre_rate.sum() + pos_rate.sum()) * 2000 +
... 2500)
>>> met_pos[met_pos < 0] = 0
>>> met_neg = met_neg = (np.random.randn(100 - (pre_rate.sum() +
... pos_rate.sum())) * 2000 + 500)
>>> met_neg[met_neg < 0] = 0
Let's compare the populations with a kruskal-wallis test. Physically, there
cannot be a negative concentration of a chemical, so we've set the lower
bound at 0. This means that we can no longer assume our distribution is
normal.
>>> from scipy.stats import kruskal
>>> def metabolite_test(x):
... return kruskal(x[0], x[1])[1]
>>> print(round(metabolite_test([met_pos, met_neg]), 3))
0.005
When we go to perform the statistical test on all the data, you might
notice that there are twice as many samples from women with *G. vaginalis*
than those without. It might make sense to account for this difference when
we're testing power. So, we're going to set the `ratio` parameter, which
lets us draw twice as many samples from women with *G. vaginalis*.
>>> pwr_est2, counts2 = subsample_power(test=metabolite_test,
... samples=[met_pos, met_neg],
... counts_interval=5,
... num_iter=100,
... num_runs=5,
... ratio=[2, 1])
>>> counts2
array([ 5., 10., 15., 20., 25., 30.])
>>> np.nanmean(pwr_est2, axis=0)
array([ 0.14 , 0.272, 0.426, 0.646, 0.824, 0.996])
>>> counts2[np.nanmean(pwr_est2, axis=0) > 0.8].min()
25.0
When we consider the number of samples per group needed in the power
analysis, we need to look at the ratio. The analysis says that we need 25
samples in the smallest group, in this case, the group of women without
*G. vaginalis* and 50 samples from women with *G. vaginalis* to see a
significant difference in the abundance of our secondary metabolite at 80%
power.
"""
# Checks the inputs
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=samples,
draw_mode=draw_mode,
ratio=ratio,
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates the power instances
for id2, c in enumerate(sample_counts):
count = np.round(c * ratio, 0).astype(int)
for id1 in range(num_runs):
ps = _compare_distributions(test=test,
samples=samples,
num_p=num_p,
counts=count,
num_iter=num_iter,
mode=draw_mode)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
@experimental(as_of="0.4.0")
def subsample_paired_power(test, meta, cat, control_cats, order=None,
strict_match=True, alpha_pwr=0.05,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Estimates power iteratively using samples with matching metadata
Parameters
----------
test : function
The statistical test which accepts a list of arrays sample ids and
returns a p value.
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str
The metadata category being varied between samples.
control_cats : list
The metadata categories to be used as controls. For example, if
you wanted to vary age (`cat` = "AGE"), you might want to control
for gender and health status (i.e. `control_cats` = ["SEX",
"HEALTHY"]).
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match : bool, optional
This determines how data is grouped using
`control_cats`. If a sample within `meta` has an undefined value (NaN)
for any of the columns in `control_cats`, the sample will not be
considered as having a match and will be ignored when `strict_match`
is True. If `strict_match` is False, missing values (NaN) in the
`control_cats` can be considered matches.
alpha_pwr : float, optional
The critical value used to calculate the power.
max_counts : positive int, optional
The maximum number of observations per sample to draw
for effect size calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array is
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Assume you are interested in the role of a specific cytokine of protein
translocation in myeloid-lineage cells. You are able to culture two
macrophage lineages (bone marrow derived phagocytes and
peritoneally-derived macrophages). Due to unfortunate circumstances, your
growth media must be acquired from multiple sources (lab, company A,
company B). Also unfortunate, you must use labor-intensive low throughput
assays. You have some preliminary measurements, and you'd like to
predict how many (more) cells you need to analyze for 80% power.
You have information about 60 cells, which we'll simulate below. Note
that we are setting a random seed value for consistency.
>>> import numpy as np
>>> import pandas as pd
>>> np.random.seed(25)
>>> data = pd.DataFrame.from_dict({
... 'CELL_LINE': np.random.binomial(1, 0.5, size=(60,)),
... 'SOURCE': np.random.binomial(2, 0.33, size=(60,)),
... 'TREATMENT': np.hstack((np.zeros((30)), np.ones((30)))),
... 'INCUBATOR': np.random.binomial(1, 0.2, size=(60,))})
>>> data['OUTCOME'] = (0.25 + data.TREATMENT * 0.25) + \
... np.random.randn(60) * (0.1 + data.SOURCE/10 + data.CELL_LINE/5)
>>> data.loc[data.OUTCOME < 0, 'OUTCOME'] = 0
>>> data.loc[data.OUTCOME > 1, 'OUTCOME'] = 1
We will approach this by assuming that the distribution of our outcome is
not normally distributed, and apply a kruskal-wallis test to compare
between the cytokine treated and untreated cells.
>>> from scipy.stats import kruskal
>>> f = lambda x: kruskal(*[data.loc[i, 'OUTCOME'] for i in x])[1]
Let's check that cytokine treatment has a significant effect across all
the cells.
>>> treatment_stat = [g for g in data.groupby('TREATMENT').groups.values()]
>>> f(treatment_stat)
0.0019386336266250209
Now, let's pick the control categories. It seems reasonable to assume there
may be an effect of cell line on the treatment outcome, which may be
attributed to differences in receptor expression. It may also be possible
that there are differences due cytokine source. Incubators were maintained
under the same conditions throughout the experiment, within one degree of
temperature difference at any given time, and the same level of CO2.
So, at least initially, let's ignore differences due to the incubator.
It's recommended that as a first pass analysis, control variables be
selected based on an idea of what may be biologically relevant to the
system, although further iteration might encourage the consideration of
variable with effect sizes similar, or larger than the variable of
interest.
>>> control_cats = ['SOURCE', 'CELL_LINE']
>>> from skbio.stats.power import subsample_paired_power
>>> pwr, cnt = subsample_paired_power(test=f,
... meta=data,
... cat='TREATMENT',
... control_cats=control_cats,
... counts_interval=5,
... num_iter=25,
... num_runs=5)
>>> cnt
array([ 5., 10., 15., 20.])
>>> pwr.mean(0)
array([ 0.24 , 0.528, 0.68 , 0.88 ])
>>> pwr.std(0).round(3)
array([ 0.088, 0.127, 0.168, 0.08 ])
Estimating off the power curve, it looks like 20 cells per group may
provide adequate power for this experiment, although the large variance
in power might suggest extending the curves or increasing the number of
samples per group.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks for the number of sampling pairs available
meta_pairs, index = _identify_sample_groups(meta, cat, control_cats, order,
strict_match)
min_obs = min([_get_min_size(meta, cat, control_cats, order, strict_match),
np.floor(len(index)*0.9)])
sub_ids = _draw_paired_samples(meta_pairs, index, min_obs)
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=sub_ids,
draw_mode='matched',
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates power instances
for id2, c in enumerate(sample_counts):
for id1 in range(num_runs):
ps = np.zeros((num_p, num_iter))
for id3 in range(num_iter):
subs = _draw_paired_samples(meta_pairs, index, c)
ps[:, id3] = test(subs)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
@experimental(as_of="0.4.0")
def confidence_bound(vec, alpha=0.05, df=None, axis=None):
r"""Calculates a confidence bound assuming a normal distribution
Parameters
----------
vec : array_like
The array of values to use in the bound calculation.
alpha : float, optional
The critical value, used for the confidence bound calculation.
df : float, optional
The degrees of freedom associated with the
distribution. If None is given, df is assumed to be the number of
elements in specified axis.
axis : positive int, optional
The axis over which to take the deviation. When axis
is None, a single value will be calculated for the whole matrix.
Returns
-------
bound : float
The confidence bound around the mean. The confidence interval is
[mean - bound, mean + bound].
"""
# Determines the number of non-nan counts
vec = np.asarray(vec)
vec_shape = vec.shape
if axis is None and len(vec_shape) == 1:
num_counts = vec_shape[0] - np.isnan(vec).sum()
elif axis is None:
num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
else:
num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
(vec_shape[0] * vec_shape[1])
# Gets the df if not supplied
if df is None:
df = num_counts - 1
# Calculates the bound
# In the conversion from scipy.stats.nanstd -> np.nanstd `ddof=1` had to be
# added to match the scipy default of `bias=False`.
bound = np.nanstd(vec, axis=axis, ddof=1) / np.sqrt(num_counts - 1) * \
scipy.stats.t.ppf(1 - alpha / 2, df)
return bound
@experimental(as_of="0.4.0")
def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
r"""Draws a list of samples varied by `cat` and matched for `control_cats`
This function is designed to provide controlled samples, based on a
metadata category. For example, one could control for age, sex, education
level, and diet type while measuring exercise frequency.
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
ids : array
a set of ids which satisfy the criteria. These are not grouped by
`cat`. An empty array indicates there are no sample ids which satisfy
the requirements.
Examples
--------
If we have a mapping file for a set of random individuals looking at
housing, sex, age and antibiotic use.
>>> import pandas as pd
>>> import numpy as np
>>> meta = {'SW': {'HOUSING': '2', 'SEX': 'M', 'AGE': np.nan, 'ABX': 'Y'},
... 'TS': {'HOUSING': '2', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'CB': {'HOUSING': '3', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'BB': {'HOUSING': '1', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'}}
>>> meta = pd.DataFrame.from_dict(meta, orient="index")
>>> meta #doctest: +SKIP
ABX HOUSING AGE SEX
BB Y 1 40s M
CB Y 3 40s M
SW Y 2 NaN M
TS Y 2 40s M
We may want to vary an individual's housing situation, while holding
constant their age, sex and antibiotic use so we can estimate the effect
size for housing, and later compare it to the effects of other variables.
>>> from skbio.stats.power import paired_subsamples
>>> ids = paired_subsamples(meta, 'HOUSING', ['SEX', 'AGE', 'ABX'])
>>> np.hstack(ids) #doctest: +ELLIPSIS
array(['BB', 'TS', 'CB']...)
So, for this set of data, we can match TS, CB, and BB based on their age,
sex, and antibiotic use. SW cannot be matched in either group because
`strict_match` was true, and there is missing AGE data for this sample.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks the groups in the category
min_obs = _get_min_size(meta, cat, control_cats, order, strict_match)
# Identifies all possible subsamples
meta_pairs, index = _identify_sample_groups(meta=meta,
cat=cat,
control_cats=control_cats,
order=order,
strict_match=strict_match)
# Draws paired ids
ids = _draw_paired_samples(meta_pairs=meta_pairs,
index=index,
num_samps=min_obs)
return ids
def _get_min_size(meta, cat, control_cats, order, strict_match):
"""Determines the smallest group represented"""
if strict_match:
all_cats = copy.deepcopy(control_cats)
all_cats.append(cat)
meta = meta[all_cats].dropna()
return meta.groupby(cat).count().loc[order, control_cats[0]].min()
def _check_nans(x, switch=False):
r"""Returns False if x is a nan and True is x is a string or number
"""
if isinstance(x, str):
return True
elif isinstance(x, (float, int)):
return not np.isnan(x)
elif switch and isinstance(x, (list, tuple)) and np.nan in x:
return False
elif switch and isinstance(x, (list, tuple)):
return True
else:
raise TypeError('input must be a string, float or a nan')
def _calculate_power(p_values, alpha=0.05):
r"""Calculates statistical power empirically
Parameters
----------
p_values : 1-D array
A 1-D numpy array with the test results.
alpha : float
The critical value for the power calculation.
Returns
-------
power : float
The empirical power, or the fraction of observed p values below the
critical value.
"""
p_values = np.atleast_2d(p_values)
w = (p_values < alpha).sum(axis=1)/p_values.shape[1]
return w
def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
num_iter=100):
r"""Compares two distribution arrays iteratively
Parameters
----------
test : function
The statistical test which accepts an array_like of sample ids
(list of lists) and returns a p-value. This can be a one-dimensional
array, or a float.
samples : list of arrays
A list where each 1-d array represents a sample. If `mode` is
"matched", there must be an equal number of observations in each
sample.
num_p : positive int, optional
The number of p-values returned by the test.
counts : positive int or 1-D array, optional
The number of samples to draw from each distribution.
If this is a 1-D array, the length must correspond to the number of
samples. The function will not draw more observations than are in a
sample. In "matched" `mode`, the same number of observations will be
drawn from each group.
mode : {"ind", "matched", "paired"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
num_iter : positive int, optional
Default 1000. The number of p-values to generate for each point on the
curve.
Returns
-------
p_values : array
The p-values for the subsampled tests. If `test` returned a single
p value, p_values is a one-dimensional array. If `test` returned an
array, `p_values` has dimensions `num_iter` x `num_p`
Raises
------
ValueError
If mode is not "ind" or "matched".
ValueError
If the arrays in samples are not the same length in "matched" mode.
ValueError
If counts is a 1-D array and counts and samples are different lengths.
"""
# Prealocates the pvalue matrix
p_values = np.zeros((num_p, num_iter))
# Determines the number of samples per group
num_groups = len(samples)
samp_lens = [len(sample) for sample in samples]
if isinstance(counts, int):
counts = np.array([counts] * num_groups)
for idx in range(num_iter):
if mode == "matched":
pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
replace=False)
subs = [sample[pos] for sample in samples]
else:
subs = [np.random.choice(np.array(pop), counts[i], replace=False)
for i, pop in enumerate(samples)]
p_values[:, idx] = test(subs)
if num_p == 1:
p_values = p_values.squeeze()
return p_values
def _check_subsample_power_inputs(test, samples, draw_mode='ind', ratio=None,
max_counts=50, counts_interval=10,
min_counts=None):
r"""Makes sure that everything is sane before power calculations
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
Returns
-------
ratio : 1-D array
The fraction of the sample counts which should be assigned to each
group.
num_p : positive integer
The number of p values returned by `test`.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
"""
if draw_mode not in {'ind', 'matched'}:
raise ValueError('mode must be "matched" or "ind".')
# Determines the minimum number of ids in a category
id_counts = np.array([len(id_) for id_ in samples])
num_ids = id_counts.min()
# Determines the number of groups
num_groups = len(samples)
# Checks that "matched" mode is handled appropriately
if draw_mode == "matched":
for id_ in samples:
if not len(id_) == num_ids:
raise ValueError('Each vector in samples must be the same '
'length in "matched" draw_mode.')
# Checks the number of counts is appropriate
if min_counts is None:
min_counts = counts_interval
if (max_counts - min_counts) < counts_interval:
raise ValueError("No subsamples of the specified size can be drawn.")
# Checks the ratio argument is sane
if ratio is None or draw_mode == 'matched':
ratio = np.ones((num_groups))
else:
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
ratio_counts = np.array([id_counts[i] / ratio[i]
for i in range(num_groups)])
largest = ratio_counts.min()
# Determines the number of p values returned by the test
p_return = test(samples)
if isinstance(p_return, float):
num_p = 1
elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
num_p = p_return.shape[0]
else:
raise TypeError('test must return a float or one-dimensional array.')
# Calculates the same counts
sample_counts = np.arange(min_counts,
min(max_counts, largest),
counts_interval)
return ratio, num_p, sample_counts
def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
"""Aggregates samples matches for `control_cats` that vary by `cat`
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
ctrl_pos : int
The location of the smallest group in `order`.
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
"""
# Sets up variables to be filled
meta_pairs = {}
index = []
i1 = 0
# Groups the data by the control groups
ctrl_groups = meta.groupby(control_cats).groups
# Identifies the samples that satisfy the control pairs. Keys are iterated
# in sorted order so that results don't change with different dictionary
# ordering.
for g in sorted(ctrl_groups, key=lambda k: str(k)):
ids = ctrl_groups[g]
# If strict_match, Skips over data that has nans
if not _check_nans(g, switch=True) and strict_match:
continue
# Draws the samples that are matched for control cats
m_ids = meta.loc[ids].groupby(cat).groups
# Checks if samples from the cat groups are represented in those
# Samples
id_vecs = [sorted(m_ids[o]) for o in order if o in
m_ids]
# If all groups are represented, the index and results are retained
if len(id_vecs) == len(order):
min_vec = np.array([len(v) for v in id_vecs])
loc_vec = np.arange(0, min_vec.min())
meta_pairs[i1] = id_vecs
index.append(np.zeros(loc_vec.shape) + i1)
i1 = i1 + 1
# If the groups are not represented, an empty array gets passed
else:
index.append(np.array([]))
# Converts index to a 1d array
index = np.hstack(index)
# If index is empty, sets up meta_paris with a no key.
if not meta_pairs:
meta_pairs['no'] = order
return meta_pairs, index
def _draw_paired_samples(meta_pairs, index, num_samps):
"""Draws a random set of ids from a matched list
Parameters
----------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
Returns
-------
ids : list
A set of randomly selected ids groups from each group.
"""
# Handles an empty paired vector
if 'no' in meta_pairs:
return [np.array([]) for o in meta_pairs['no']]
# Identifies the absolute positions of the control group being drawn
set_pos = np.random.choice(index, int(num_samps),
replace=False).astype(int)
subs = []
# Draws the other groups. Get a collection.Counter object for simplicity
counter = collections.Counter(set_pos)
# counter.items() order isn't guaranteed in python 3.6 and then the random
# choice isn't reproducible between python version, even specifying seed;
# so we access such values through sets.
set_list = set(set_pos)
# then, as stated by @RNAer, since we can't assure that items in sets are
# ordered, we choose to order set_list before accessing values
set_list = sorted(set_list)
# now set_list is ordered and we can iterate over it to get counter obj
for set_ in set_list:
num_ = counter[set_]
r2 = [np.random.choice(col, num_, replace=False) for col in
meta_pairs[set_]]
subs.append(r2)
ids = [np.hstack(ids) for ids in zip(*subs)]
return ids
def _calculate_power_curve(test, samples, sample_counts, ratio=None,
mode='ind', num_iter=1000, alpha=0.05):
r"""Generates an empirical power curve for the samples.
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values and
returns a p value.
samples : array_like
`samples` can be a list of lists or an array where each sublist or row
in the array corresponds to a sampled group.
sample_counts : 1-D array
A vector of the number of samples which should be sampled in each
curve.
mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample.
num_iter : int
The default is 1000. The number of p-values to generate for each point
on the curve.
Returns
-------
p_values : array
The p-values associated with the input sample counts.
Raises
------
ValueError
If ratio is an array and ratio is not the same length as samples
"""
# Casts array-likes to arrays
sample_counts = np.asarray(sample_counts)
# Determines the number of groups
num_groups = len(samples)
num_samps = len(sample_counts)
if isinstance(alpha, float):
vec = True
pwr = np.zeros((num_samps))
alpha = np.array([alpha])
else:
vec = False
num_crit = alpha.shape[0]
pwr = np.zeros((num_crit, num_samps))
# Checks the ratio argument
if ratio is None:
ratio = np.ones((num_groups))
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
# Loops through the sample sizes
for id2, s in enumerate(sample_counts):
count = np.round(s * ratio, 0).astype(int)
for id1, a in enumerate(alpha):
ps = _compare_distributions(test=test,
samples=samples,
counts=count,
num_p=1,
num_iter=num_iter,
mode=mode)
if vec:
pwr[id2] = _calculate_power(ps, a)
else:
pwr[id1, id2] = _calculate_power(ps, a)
return pwr
| bsd-3-clause |
pydata/xarray | xarray/tests/test_dask.py | 1 | 59200 | import operator
import pickle
import sys
from contextlib import suppress
from distutils.version import LooseVersion
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.ufuncs as xu
from xarray import DataArray, Dataset, Variable
from xarray.core import duck_array_ops
from xarray.testing import assert_chunks_equal
from xarray.tests import mock
from ..core.duck_array_ops import lazy_array_equiv
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_frame_equal,
assert_identical,
raise_if_dask_computes,
requires_pint_0_15,
requires_scipy_or_netCDF4,
)
from .test_backends import create_tmp_file
dask = pytest.importorskip("dask")
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
ON_WINDOWS = sys.platform == "win32"
def test_raise_if_dask_computes():
data = da.from_array(np.random.RandomState(0).randn(4, 6), chunks=(2, 2))
with pytest.raises(RuntimeError, match=r"Too many computes"):
with raise_if_dask_computes():
data.compute()
class DaskTestCase:
def assertLazyAnd(self, expected, actual, test):
with dask.config.set(scheduler="synchronous"):
test(actual, expected)
if isinstance(actual, Dataset):
for k, v in actual.variables.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, DataArray):
assert isinstance(actual.data, da.Array)
for k, v in actual.coords.items():
if k in actual.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
elif isinstance(actual, Variable):
assert isinstance(actual.data, da.Array)
else:
assert False
class TestVariable(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.RandomState(0).randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(("x", "y"), self.values)
self.lazy_var = Variable(("x", "y"), self.data)
def test_basics(self):
v = self.lazy_var
assert self.data is v.data
assert self.data.chunks == v.chunks
assert_array_equal(self.values, v)
def test_copy(self):
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True))
def test_chunk(self):
for chunks, expected in [
({}, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({"x": 3, "y": 3}, ((3, 1), (3, 3))),
({"x": 3}, ((3, 1), (2, 2, 2))),
({"x": (3, 1)}, ((3, 1), (2, 2, 2))),
]:
rechunked = self.lazy_var.chunk(chunks)
assert rechunked.chunks == expected
self.assertLazyAndIdentical(self.eager_var, rechunked)
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
@pytest.mark.skipif(
LooseVersion(dask.__version__) < LooseVersion("2021.04.1"),
reason="Requires dask v2021.04.1 or later",
)
@pytest.mark.parametrize(
"expected_data, index",
[
(da.array([99, 2, 3, 4]), 0),
(da.array([99, 99, 99, 4]), slice(2, None, -1)),
(da.array([99, 99, 3, 99]), [0, -1, 1]),
(da.array([99, 99, 99, 4]), np.arange(3)),
(da.array([1, 99, 99, 99]), [False, True, True, True]),
(da.array([1, 99, 99, 99]), np.arange(4) > 0),
(da.array([99, 99, 99, 99]), Variable(("x"), da.array([1, 2, 3, 4])) > 0),
],
)
def test_setitem_dask_array(self, expected_data, index):
arr = Variable(("x"), da.array([1, 2, 3, 4]))
expected = Variable(("x"), expected_data)
arr[index] = 99
assert_identical(arr, expected)
@pytest.mark.skipif(
LooseVersion(dask.__version__) >= LooseVersion("2021.04.1"),
reason="Requires dask v2021.04.0 or earlier",
)
def test_setitem_dask_array_error(self):
with pytest.raises(TypeError, match=r"stored in a dask array"):
v = self.lazy_var
v[:1] = 0
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
assert v.equals(v)
assert isinstance(v.data, da.Array)
assert v.identical(v)
assert isinstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_shift(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
assert v.data.chunks == v.shift(x=1).data.chunks
def test_roll(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
assert v.data.chunks == v.roll(x=1).data.chunks
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_repr(self):
expected = dedent(
"""\
<xarray.Variable (x: 4, y: 6)>
{!r}""".format(
self.lazy_var.data
)
)
assert expected == repr(self.lazy_var)
def test_pickle(self):
# Test that pickling/unpickling does not convert the dask
# backend to numpy
a1 = Variable(["x"], build_dask_array("x"))
a1.compute()
assert not a1._in_memory
assert kernel_call_count == 1
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 1
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
with raise_if_dask_computes():
actual = v.argmax(dim="x")
self.assertLazyAndAllClose(u.argmax(dim="x"), actual)
with raise_if_dask_computes():
actual = v.argmin(dim="x")
self.assertLazyAndAllClose(u.argmin(dim="x"), actual)
self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x"))
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median()
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median(v.dims)
with raise_if_dask_computes():
v.reduce(duck_array_ops.mean)
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable("x", values)
lazy_var = Variable("x", data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable("x", range(4)), lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], "x"))
self.assertLazyAndIdentical(
u[:3], Variable.concat([v[[0, 2]], v[[1]]], "x", positions=[[0, 2], [1]])
)
def test_missing_methods(self):
v = self.lazy_var
try:
v.argsort()
except NotImplementedError as err:
assert "dask" in str(err)
try:
v[0].item()
except NotImplementedError as err:
assert "dask" in str(err)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
def test_compute(self):
u = self.eager_var
v = self.lazy_var
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_var
v = self.lazy_var + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
@requires_pint_0_15(reason="Need __dask_tokenize__")
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, "meter")
variable = xr.Variable(("x", "y"), q)
token = dask.base.tokenize(variable)
post_op = variable + 5 * unit_registry.meter
assert dask.base.tokenize(variable) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(variable) == token
class TestDataArrayAndDataset(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
def assertLazyAndEqual(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_equal)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_array = DataArray(
self.values, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
self.lazy_array = DataArray(
self.data, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
def test_rechunk(self):
chunked = self.eager_array.chunk({"x": 2}).chunk({"y": 2})
assert chunked.chunks == ((2,) * 2, (2,) * 3)
self.assertLazyAndIdentical(self.lazy_array, chunked)
def test_new_chunk(self):
chunked = self.eager_array.chunk()
assert chunked.data.name.startswith("xarray-<this-array>")
def test_lazy_dataset(self):
lazy_ds = Dataset({"foo": (("x", "y"), self.data)})
assert isinstance(lazy_ds.foo.variable.data, da.Array)
def test_lazy_array(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(u, v)
self.assertLazyAndAllClose(-u, -v)
self.assertLazyAndAllClose(u.T, v.T)
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(1 + u, 1 + v)
actual = xr.concat([v[:2], v[2:]], "x")
self.assertLazyAndAllClose(u, actual)
def test_compute(self):
u = self.eager_array
v = self.lazy_array
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_array
v = self.lazy_array + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
def test_concat_loads_variables(self):
# Test that concat() computes not-in-memory variables at most once
# and loads them in the output, while leaving the input unaltered.
d1 = build_dask_array("d1")
c1 = build_dask_array("c1")
d2 = build_dask_array("d2")
c2 = build_dask_array("c2")
d3 = build_dask_array("d3")
c3 = build_dask_array("c3")
# Note: c is a non-index coord.
# Index coords are loaded by IndexVariable.__init__.
ds1 = Dataset(data_vars={"d": ("x", d1)}, coords={"c": ("x", c1)})
ds2 = Dataset(data_vars={"d": ("x", d2)}, coords={"c": ("x", c2)})
ds3 = Dataset(data_vars={"d": ("x", d3)}, coords={"c": ("x", c3)})
assert kernel_call_count == 0
out = xr.concat(
[ds1, ds2, ds3], dim="n", data_vars="different", coords="different"
)
# each kernel is computed exactly once
assert kernel_call_count == 6
# variables are loaded in the output
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars="all", coords="all")
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=["d"], coords=["c"])
# no extra kernel calls
assert kernel_call_count == 6
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat([ds1, ds2, ds3], dim="n", data_vars=[], coords=[])
# variables are loaded once as we are validing that they're identical
assert kernel_call_count == 12
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
out = xr.concat(
[ds1, ds2, ds3],
dim="n",
data_vars="different",
coords="different",
compat="identical",
)
# compat=identical doesn't do any more kernel calls than compat=equals
assert kernel_call_count == 18
assert isinstance(out["d"].data, np.ndarray)
assert isinstance(out["c"].data, np.ndarray)
# When the test for different turns true halfway through,
# stop computing variables as it would not have any benefit
ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])})
out = xr.concat(
[ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different"
)
# the variables of ds1 and ds2 were computed, but those of ds3 didn't
assert kernel_call_count == 22
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
# the data of ds1 and ds2 was loaded into numpy and then
# concatenated to the data of ds3. Thus, only ds3 is computed now.
out.compute()
assert kernel_call_count == 24
# Finally, test that originals are unaltered
assert ds1["d"].data is d1
assert ds1["c"].data is c1
assert ds2["d"].data is d2
assert ds2["c"].data is c2
assert ds3["d"].data is d3
assert ds3["c"].data is c3
# now check that concat() is correctly using dask name equality to skip loads
out = xr.concat(
[ds1, ds1, ds1], dim="n", data_vars="different", coords="different"
)
assert kernel_call_count == 24
# variables are not loaded in the output
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat(
[ds1, ds1, ds1], dim="n", data_vars=[], coords=[], compat="identical"
)
assert kernel_call_count == 24
# variables are not loaded in the output
assert isinstance(out["d"].data, dask.array.Array)
assert isinstance(out["c"].data, dask.array.Array)
out = xr.concat(
[ds1, ds2.compute(), ds3],
dim="n",
data_vars="all",
coords="different",
compat="identical",
)
# c1,c3 must be computed for comparison since c2 is numpy;
# d2 is computed too
assert kernel_call_count == 28
out = xr.concat(
[ds1, ds2.compute(), ds3],
dim="n",
data_vars="all",
coords="all",
compat="identical",
)
# no extra computes
assert kernel_call_count == 30
# Finally, test that originals are unaltered
assert ds1["d"].data is d1
assert ds1["c"].data is c1
assert ds2["d"].data is d2
assert ds2["c"].data is c2
assert ds3["d"].data is d3
assert ds3["c"].data is c3
def test_groupby(self):
u = self.eager_array
v = self.lazy_array
expected = u.groupby("x").mean(...)
with raise_if_dask_computes():
actual = v.groupby("x").mean(...)
self.assertLazyAndAllClose(expected, actual)
def test_rolling(self):
u = self.eager_array
v = self.lazy_array
expected = u.rolling(x=2).mean()
with raise_if_dask_computes():
actual = v.rolling(x=2).mean()
self.assertLazyAndAllClose(expected, actual)
def test_groupby_first(self):
u = self.eager_array
v = self.lazy_array
for coords in [u.coords, v.coords]:
coords["ab"] = ("x", ["a", "a", "b", "b"])
with pytest.raises(NotImplementedError, match=r"dask"):
v.groupby("ab").first()
expected = u.groupby("ab").first()
with raise_if_dask_computes():
actual = v.groupby("ab").first(skipna=False)
self.assertLazyAndAllClose(expected, actual)
def test_reindex(self):
u = self.eager_array.assign_coords(y=range(6))
v = self.lazy_array.assign_coords(y=range(6))
for kwargs in [
{"x": [2, 3, 4]},
{"x": [1, 100, 2, 101, 3]},
{"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
]:
expected = u.reindex(**kwargs)
actual = v.reindex(**kwargs)
self.assertLazyAndAllClose(expected, actual)
def test_to_dataset_roundtrip(self):
u = self.eager_array
v = self.lazy_array
expected = u.assign_coords(x=u["x"])
self.assertLazyAndEqual(expected, v.to_dataset("x").to_array("x"))
def test_merge(self):
def duplicate_and_merge(array):
return xr.merge([array, array.rename("bar")]).to_array()
expected = duplicate_and_merge(self.eager_array)
actual = duplicate_and_merge(self.lazy_array)
self.assertLazyAndEqual(expected, actual)
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_where_dispatching(self):
a = np.arange(10)
b = a > 3
x = da.from_array(a, 5)
y = da.from_array(b, 5)
expected = DataArray(a).where(b)
self.assertLazyAndEqual(expected, DataArray(a).where(y))
self.assertLazyAndEqual(expected, DataArray(x).where(b))
self.assertLazyAndEqual(expected, DataArray(x).where(y))
def test_simultaneous_compute(self):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
count = [0]
def counting_get(*args, **kwargs):
count[0] += 1
return dask.get(*args, **kwargs)
ds.load(scheduler=counting_get)
assert count[0] == 1
def test_stack(self):
data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))
arr = DataArray(data, dims=("w", "x", "y"))
stacked = arr.stack(z=("x", "y"))
z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"])
expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"])
assert stacked.data.chunks == expected.data.chunks
self.assertLazyAndEqual(expected, stacked)
def test_dot(self):
eager = self.eager_array.dot(self.eager_array[0])
lazy = self.lazy_array.dot(self.lazy_array[0])
self.assertLazyAndAllClose(eager, lazy)
@pytest.mark.skipif(LooseVersion(dask.__version__) >= "2.0", reason="no meta")
def test_dataarray_repr_legacy(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.DataArray 'data' (x: 1)>
{!r}
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x""".format(
data
)
)
assert expected == repr(a)
assert kernel_call_count == 0 # should not evaluate dask array
@pytest.mark.skipif(LooseVersion(dask.__version__) < "2.0", reason="needs meta")
def test_dataarray_repr(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.DataArray 'data' (x: 1)>
{!r}
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x""".format(
data
)
)
assert expected == repr(a)
assert kernel_call_count == 0 # should not evaluate dask array
@pytest.mark.skipif(LooseVersion(dask.__version__) < "2.0", reason="needs meta")
def test_dataset_repr(self):
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 1)
Coordinates:
y (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>
Dimensions without coordinates: x
Data variables:
a (x) int64 dask.array<chunksize=(1,), meta=np.ndarray>"""
)
assert expected == repr(ds)
assert kernel_call_count == 0 # should not evaluate dask array
def test_dataarray_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variable nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a1 = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
a1.compute()
assert not a1._in_memory
assert not a1.coords["y"]._in_memory
assert kernel_call_count == 2
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 2
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
assert not a1.coords["y"]._in_memory
assert not a2.coords["y"]._in_memory
def test_dataset_pickle(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds1 = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
ds1.compute()
assert not ds1["a"]._in_memory
assert not ds1["y"]._in_memory
assert kernel_call_count == 2
ds2 = pickle.loads(pickle.dumps(ds1))
assert kernel_call_count == 2
assert_identical(ds1, ds2)
assert not ds1["a"]._in_memory
assert not ds2["a"]._in_memory
assert not ds1["y"]._in_memory
assert not ds2["y"]._in_memory
def test_dataarray_getattr(self):
# ipython/jupyter does a long list of getattr() calls to when trying to
# represent an object.
# Make sure we're not accidentally computing dask variables.
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)})
with suppress(AttributeError):
getattr(a, "NOTEXIST")
assert kernel_call_count == 0
def test_dataset_getattr(self):
# Test that pickling/unpickling converts the dask backend
# to numpy in neither the data variables nor the non-index coords
data = build_dask_array("data")
nonindex_coord = build_dask_array("coord")
ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)})
with suppress(AttributeError):
getattr(ds, "NOTEXIST")
assert kernel_call_count == 0
def test_values(self):
# Test that invoking the values property does not convert the dask
# backend to numpy
a = DataArray([1, 2]).chunk()
assert not a._in_memory
assert a.values.tolist() == [1, 2]
assert not a._in_memory
def test_from_dask_variable(self):
# Test array creation from Variable with dask backend.
# This is used e.g. in broadcast()
a = DataArray(self.lazy_array.variable, coords={"x": range(4)}, name="foo")
self.assertLazyAndIdentical(self.lazy_array, a)
@requires_pint_0_15(reason="Need __dask_tokenize__")
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, unit_registry.meter)
data_array = xr.DataArray(
data=q, coords={"x": range(4)}, dims=("x", "y"), name="foo"
)
token = dask.base.tokenize(data_array)
post_op = data_array + 5 * unit_registry.meter
assert dask.base.tokenize(data_array) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(data_array) == token
class TestToDaskDataFrame:
def test_to_dask_dataframe(self):
# Test conversion of Datasets to dask DataFrames
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset(
{"a": ("t", da.from_array(x, chunks=4)), "b": ("t", y), "t": ("t", t)}
)
expected_pd = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
# test if 1-D index is correctly set up
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
# test if we have dask dataframes
assert isinstance(actual, dd.DataFrame)
# use the .equals from pandas to check dataframes are equivalent
assert_frame_equal(expected.compute(), actual.compute())
# test if no index is given
expected = dd.from_pandas(expected_pd.reset_index(drop=False), chunksize=4)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_2D(self):
# Test if 2-D dataset is supplied
w = np.random.randn(2, 3)
ds = Dataset({"w": (("x", "y"), da.from_array(w, chunks=(1, 2)))})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
# dask dataframes do not (yet) support multiindex,
# but when it does, this would be the expected index:
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame({"w": w.reshape(-1)}, index=exp_index)
# so for now, reset the index
expected = expected.reset_index(drop=False)
actual = ds.to_dask_dataframe(set_index=False)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
@pytest.mark.xfail(raises=NotImplementedError)
def test_to_dask_dataframe_2D_set_index(self):
# This will fail until dask implements MultiIndex support
w = da.from_array(np.random.randn(2, 3), chunks=(1, 2))
ds = Dataset({"w": (("x", "y"), w)})
ds["x"] = ("x", np.array([0, 1], np.int64))
ds["y"] = ("y", list("abc"))
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_coordinates(self):
# Test if coordinate is also a dask array
x = np.random.randn(10)
t = np.arange(10) * 2
ds = Dataset(
{
"a": ("t", da.from_array(x, chunks=4)),
"t": ("t", da.from_array(t, chunks=4)),
}
)
expected_pd = pd.DataFrame({"a": x}, index=pd.Index(t, name="t"))
expected = dd.from_pandas(expected_pd, chunksize=4)
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected.compute(), actual.compute())
def test_to_dask_dataframe_not_daskarray(self):
# Test if DataArray is not a dask array
x = np.random.randn(10)
y = np.arange(10, dtype="uint8")
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = pd.DataFrame({"a": x, "b": y}, index=pd.Index(t, name="t"))
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_no_coordinate(self):
x = da.from_array(np.random.randn(10), chunks=4)
ds = Dataset({"x": ("dim_0", x)})
expected = ds.compute().to_dataframe().reset_index()
actual = ds.to_dask_dataframe()
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds.compute().to_dataframe()
actual = ds.to_dask_dataframe(set_index=True)
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
def test_to_dask_dataframe_dim_order(self):
values = np.array([[1, 2], [3, 4]], dtype=np.int64)
ds = Dataset({"w": (("x", "y"), values)}).chunk(1)
expected = ds["w"].to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["x", "y"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds["w"].T.to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["y", "x"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
with pytest.raises(ValueError, match=r"does not match the set of dimensions"):
ds.to_dask_dataframe(dim_order=["x"])
@pytest.mark.parametrize("method", ["load", "compute"])
def test_dask_kwargs_variable(method):
x = Variable("y", da.from_array(np.arange(3), chunks=(2,)))
# args should be passed on to da.Array.compute()
with mock.patch.object(
da.Array, "compute", return_value=np.arange(3)
) as mock_compute:
getattr(x, method)(foo="bar")
mock_compute.assert_called_with(foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataarray(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = DataArray(data)
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataset(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = Dataset({"x": (("y"), data)})
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
kernel_call_count = 0
def kernel(name):
"""Dask kernel to test pickling/unpickling and __repr__.
Must be global to make it pickleable.
"""
global kernel_call_count
kernel_call_count += 1
return np.ones(1, dtype=np.int64)
def build_dask_array(name):
global kernel_call_count
kernel_call_count = 0
return dask.array.Array(
dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64
)
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_Dataset(persist):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
ds = ds + 1
n = len(ds.foo.data.dask)
ds2 = persist(ds)
assert len(ds2.foo.data.dask) == 1
assert len(ds.foo.data.dask) == n # doesn't mutate in place
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_DataArray(persist):
x = da.arange(10, chunks=(5,))
y = DataArray(x)
z = y + 1
n = len(z.data.dask)
zz = persist(z)
assert len(z.data.dask) == n
assert len(zz.data.dask) == zz.data.npartitions
def test_dataarray_with_dask_coords():
import toolz
x = xr.Variable("x", da.arange(8, chunks=(4,)))
y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2)
data = da.random.random((8, 8), chunks=(4, 4)) + 1
array = xr.DataArray(data, dims=["x", "y"])
array.coords["xx"] = x
array.coords["yy"] = y
assert dict(array.__dask_graph__()) == toolz.merge(
data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__()
)
(array2,) = dask.compute(array)
assert not dask.is_dask_collection(array2)
assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())
def test_basic_compute():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2})
for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]:
with dask.config.set(scheduler=get):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
def test_dask_layers_and_dependencies():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
x = dask.delayed(ds)
assert set(x.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
assert set(x.foo.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
def make_da():
da = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(100, 120)},
name="a",
).chunk({"x": 4, "y": 5})
da.x.attrs["long_name"] = "x"
da.attrs["test"] = "test"
da.coords["c2"] = 0.5
da.coords["ndcoord"] = da.x * 2
da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5})
return da
def make_ds():
map_ds = xr.Dataset()
map_ds["a"] = make_da()
map_ds["b"] = map_ds.a + 50
map_ds["c"] = map_ds.x + 20
map_ds = map_ds.chunk({"x": 4, "y": 5})
map_ds["d"] = ("z", [1, 1, 1, 1])
map_ds["z"] = [0, 1, 2, 3]
map_ds["e"] = map_ds.x + map_ds.y
map_ds.coords["c1"] = 0.5
map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x)))
map_ds.coords["cx"].attrs["test2"] = "test2"
map_ds.attrs["test"] = "test"
map_ds.coords["xx"] = map_ds["a"] * map_ds.y
map_ds.x.attrs["long_name"] = "x"
map_ds.y.attrs["long_name"] = "y"
return map_ds
# fixtures cannot be used in parametrize statements
# instead use this workaround
# https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly
@pytest.fixture
def map_da():
return make_da()
@pytest.fixture
def map_ds():
return make_ds()
def test_unify_chunks(map_ds):
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
ds_copy.chunks
expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)}
with raise_if_dask_computes():
actual_chunks = ds_copy.unify_chunks().chunks
assert actual_chunks == expected_chunks
assert_identical(map_ds, ds_copy.unify_chunks())
out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy"))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == expected_chunks
# Test unordered dims
da = ds_copy["cxy"]
out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1}))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2))
# Test mismatch
with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"):
xr.unify_chunks(da, da.isel(x=slice(2)))
@pytest.mark.parametrize("obj", [make_ds(), make_da()])
@pytest.mark.parametrize(
"transform", [lambda x: x.compute(), lambda x: x.unify_chunks()]
)
def test_unify_chunks_shallow_copy(obj, transform):
obj = transform(obj)
unified = obj.unify_chunks()
assert_identical(obj, unified) and obj is not obj.unify_chunks()
@pytest.mark.parametrize("obj", [make_da()])
def test_auto_chunk_da(obj):
actual = obj.chunk("auto").data
expected = obj.data.rechunk("auto")
np.testing.assert_array_equal(actual, expected)
assert actual.chunks == expected.chunks
def test_map_blocks_error(map_da, map_ds):
def bad_func(darray):
return (darray * darray.x + 5 * darray.y)[:1, :1]
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(bad_func, map_da).compute()
def returns_numpy(darray):
return (darray * darray.x + 5 * darray.y).values
with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"):
xr.map_blocks(returns_numpy, map_da)
with pytest.raises(TypeError, match=r"args must be"):
xr.map_blocks(operator.add, map_da, args=10)
with pytest.raises(TypeError, match=r"kwargs must be"):
xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])
def really_bad_func(darray):
raise ValueError("couldn't do anything.")
with pytest.raises(Exception, match=r"Cannot infer"):
xr.map_blocks(really_bad_func, map_da)
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
xr.map_blocks(bad_func, ds_copy)
with pytest.raises(TypeError, match=r"Cannot pass dask collections"):
xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk()))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj)
expected = func(obj)
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_convert_args_to_list(obj):
expected = obj + 10
with raise_if_dask_computes():
actual = xr.map_blocks(operator.add, obj, [10])
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_dask_args():
da1 = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(20)},
).chunk({"x": 5, "y": 4})
# check that block shapes are the same
def sumda(da1, da2):
assert da1.shape == da2.shape
return da1 + da2
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(sumda, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# one dimension in common
da2 = (da1 + 1).isel(x=1, drop=True)
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# test that everything works when dimension names are different
da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"})
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"):
xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})])
with pytest.raises(ValueError, match=r"indexes along dimension 'x' are not equal"):
xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))])
# reduction
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2])
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
# reduction with template
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(
lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x")
)
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_add_attrs(obj):
def add_attrs(obj):
obj = obj.copy(deep=True)
obj.attrs["new"] = "new"
obj.cxy.attrs["new2"] = "new2"
return obj
expected = add_attrs(obj)
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj)
assert_identical(actual, expected)
# when template is specified, attrs are copied from template, not set by function
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj, template=obj)
assert_identical(actual, obj)
def test_map_blocks_change_name(map_da):
def change_name(obj):
obj = obj.copy(deep=True)
obj.name = "new"
return obj
expected = change_name(map_da)
with raise_if_dask_computes():
actual = xr.map_blocks(change_name, map_da)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_kwargs(obj):
expected = xr.full_like(obj, fill_value=np.nan)
with raise_if_dask_computes():
actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan))
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_to_array(map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(lambda x: x.to_array(), map_ds)
# to_array does not preserve name, so cannot use assert_identical
assert_equal(actual, map_ds.to_array())
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.to_dataset(),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)),
lambda x: x.astype(np.int32),
lambda x: x.x,
],
)
def test_map_blocks_da_transformations(func, map_da):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_da)
assert_identical(actual, func(map_da))
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.drop_vars("cxy"),
lambda x: x.drop_vars("a"),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.rename({"a": "new1", "b": "new2"}),
lambda x: x.x,
],
)
def test_map_blocks_ds_transformations(func, map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_ds)
assert_identical(actual, func(map_ds))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_da_ds_with_template(obj):
func = lambda x: x.isel(x=[1])
template = obj.isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj, template=template)
assert_identical(actual, template)
with raise_if_dask_computes():
actual = obj.map_blocks(func, template=template)
assert_identical(actual, template)
def test_map_blocks_template_convert_object():
da = make_da()
func = lambda x: x.to_dataset().isel(x=[1])
template = da.to_dataset().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, da, template=template)
assert_identical(actual, template)
ds = da.to_dataset()
func = lambda x: x.to_array().isel(x=[1])
template = ds.to_array().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, ds, template=template)
assert_identical(actual, template)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_errors_bad_template(obj):
with pytest.raises(ValueError, match=r"unexpected coordinate variables"):
xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"does not contain coordinate variables"):
xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"):
xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()
with pytest.raises(TypeError, match=r"must be a DataArray"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute()
with pytest.raises(ValueError, match=r"map_blocks requires that one block"):
xr.map_blocks(
lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1])
).compute()
with pytest.raises(ValueError, match=r"Expected index 'x' to be"):
xr.map_blocks(
lambda a: a.isel(x=[1]).assign_coords(x=[120]), # assign bad index values
obj,
template=obj.isel(x=[1, 5, 9]),
).compute()
def test_map_blocks_errors_bad_template_2(map_ds):
with pytest.raises(ValueError, match=r"unexpected data variables {'xyz'}"):
xr.map_blocks(lambda x: x.assign(xyz=1), map_ds, template=map_ds).compute()
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_object_method(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
expected = xr.map_blocks(func, obj)
actual = obj.map_blocks(func)
assert_identical(expected, actual)
def test_map_blocks_hlg_layers():
# regression test for #3599
ds = xr.Dataset(
{
"x": (("a",), dask.array.ones(10, chunks=(5,))),
"z": (("b",), dask.array.ones(10, chunks=(5,))),
}
)
mapped = ds.map_blocks(lambda x: x)
xr.testing.assert_equal(mapped, ds)
def test_make_meta(map_ds):
from ..core.parallel import make_meta
meta = make_meta(map_ds)
for variable in map_ds._coord_names:
assert variable in meta._coord_names
assert meta.coords[variable].shape == (0,) * meta.coords[variable].ndim
for variable in map_ds.data_vars:
assert variable in meta.data_vars
assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim
def test_identical_coords_no_computes():
lons2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
a = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
b = xr.DataArray(
da.zeros((10, 10), chunks=2), dims=("y", "x"), coords={"lons": lons2}
)
with raise_if_dask_computes():
c = a + b
assert_identical(c, a)
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
@pytest.mark.parametrize(
"transform",
[
lambda x: x.reset_coords(),
lambda x: x.reset_coords(drop=True),
lambda x: x.isel(x=1),
lambda x: x.attrs.update(new_attrs=1),
lambda x: x.assign_coords(cxy=1),
lambda x: x.rename({"x": "xnew"}),
lambda x: x.rename({"cxy": "cxynew"}),
],
)
def test_token_changes_on_transform(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj))
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
def test_token_changes_when_data_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
# Change data_var
if isinstance(obj, DataArray):
obj *= 2
else:
obj["a"] *= 2
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
# Change non-index coord
obj.coords["ndcoord"] *= 2
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
# Change IndexVariable
obj = obj.assign_coords(x=obj.x * 2)
with raise_if_dask_computes():
t4 = dask.base.tokenize(obj)
assert t4 != t3
@pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()])
def test_token_changes_when_buffer_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)
if isinstance(obj, DataArray):
obj[0, 0] = 123
else:
obj["a"][0, 0] = 123
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1
obj.coords["ndcoord"][0] = 123
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2
@pytest.mark.parametrize(
"transform",
[lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
@pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]])
def test_token_identical(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))
assert dask.base.tokenize(obj.compute()) == dask.base.tokenize(
transform(obj.compute())
)
def test_recursive_token():
"""Test that tokenization is invoked recursively, and doesn't just rely on the
output of str()
"""
a = np.ones(10000)
b = np.ones(10000)
b[5000] = 2
assert str(a) == str(b)
assert dask.base.tokenize(a) != dask.base.tokenize(b)
# Test DataArray and Variable
da_a = DataArray(a)
da_b = DataArray(b)
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
# Test Dataset
ds_a = da_a.to_dataset(name="x")
ds_b = da_b.to_dataset(name="x")
assert dask.base.tokenize(ds_a) != dask.base.tokenize(ds_b)
# Test IndexVariable
da_a = DataArray(a, dims=["x"], coords={"x": a})
da_b = DataArray(a, dims=["x"], coords={"x": b})
assert dask.base.tokenize(da_a) != dask.base.tokenize(da_b)
@requires_scipy_or_netCDF4
def test_normalize_token_with_backend(map_ds):
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
map_ds.to_netcdf(tmp_file)
read = xr.open_dataset(tmp_file)
assert not dask.base.tokenize(map_ds) == dask.base.tokenize(read)
read.close()
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_variables(compat):
var1 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var2 = xr.Variable(("y", "x"), da.zeros((10, 10), chunks=2))
var3 = xr.Variable(("y", "x"), da.zeros((20, 10), chunks=2))
with raise_if_dask_computes():
assert getattr(var1, compat)(var2, equiv=lazy_array_equiv)
# values are actually equal, but we don't know that till we compute, return None
with raise_if_dask_computes():
assert getattr(var1, compat)(var2 / 2, equiv=lazy_array_equiv) is None
# shapes are not equal, return False without computes
with raise_if_dask_computes():
assert getattr(var1, compat)(var3, equiv=lazy_array_equiv) is False
# if one or both arrays are numpy, return None
assert getattr(var1, compat)(var2.compute(), equiv=lazy_array_equiv) is None
assert (
getattr(var1.compute(), compat)(var2.compute(), equiv=lazy_array_equiv) is None
)
with raise_if_dask_computes():
assert getattr(var1, compat)(var2.transpose("y", "x"))
@pytest.mark.parametrize(
"compat", ["broadcast_equals", "equals", "identical", "no_conflicts"]
)
def test_lazy_array_equiv_merge(compat):
da1 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da2 = xr.DataArray(da.zeros((10, 10), chunks=2), dims=("y", "x"))
da3 = xr.DataArray(da.ones((20, 10), chunks=2), dims=("y", "x"))
with raise_if_dask_computes():
xr.merge([da1, da2], compat=compat)
# shapes are not equal; no computes necessary
with raise_if_dask_computes(max_computes=0):
with pytest.raises(ValueError):
xr.merge([da1, da3], compat=compat)
with raise_if_dask_computes(max_computes=2):
xr.merge([da1, da2 / 2], compat=compat)
@pytest.mark.filterwarnings("ignore::FutureWarning") # transpose_coords
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
@pytest.mark.parametrize(
"transform",
[
lambda a: a.assign_attrs(new_attr="anew"),
lambda a: a.assign_coords(cxy=a.cxy),
lambda a: a.copy(),
lambda a: a.isel(x=np.arange(a.sizes["x"])),
lambda a: a.isel(x=slice(None)),
lambda a: a.loc[dict(x=slice(None))],
lambda a: a.loc[dict(x=np.arange(a.sizes["x"]))],
lambda a: a.loc[dict(x=a.x)],
lambda a: a.sel(x=a.x),
lambda a: a.sel(x=a.x.values),
lambda a: a.transpose(...),
lambda a: a.squeeze(), # no dimensions to squeeze
lambda a: a.sortby("x"), # "x" is already sorted
lambda a: a.reindex(x=a.x),
lambda a: a.reindex_like(a),
lambda a: a.rename({"cxy": "cnew"}).rename({"cnew": "cxy"}),
lambda a: a.pipe(lambda x: x),
lambda a: xr.align(a, xr.zeros_like(a))[0],
# assign
# swap_dims
# set_index / reset_index
],
)
def test_transforms_pass_lazy_array_equiv(obj, transform):
with raise_if_dask_computes():
assert_equal(obj, transform(obj))
def test_more_transforms_pass_lazy_array_equiv(map_da, map_ds):
with raise_if_dask_computes():
assert_equal(map_ds.cxy.broadcast_like(map_ds.cxy), map_ds.cxy)
assert_equal(xr.broadcast(map_ds.cxy, map_ds.cxy)[0], map_ds.cxy)
assert_equal(map_ds.map(lambda x: x), map_ds)
assert_equal(map_ds.set_coords("a").reset_coords("a"), map_ds)
assert_equal(map_ds.update({"a": map_ds.a}), map_ds)
# fails because of index error
# assert_equal(
# map_ds.rename_dims({"x": "xnew"}).rename_dims({"xnew": "x"}), map_ds
# )
assert_equal(
map_ds.rename_vars({"cxy": "cnew"}).rename_vars({"cnew": "cxy"}), map_ds
)
assert_equal(map_da._from_temp_dataset(map_da._to_temp_dataset()), map_da)
assert_equal(map_da.astype(map_da.dtype), map_da)
assert_equal(map_da.transpose("y", "x", transpose_coords=False).cxy, map_da.cxy)
def test_optimize():
# https://github.com/pydata/xarray/issues/3698
a = dask.array.ones((10, 4), chunks=(5, 2))
arr = xr.DataArray(a).chunk(5)
(arr2,) = dask.optimize(arr)
arr2.compute()
# The graph_manipulation module is in dask since 2021.2 but it became usable with
# xarray only since 2021.3
@pytest.mark.skipif(LooseVersion(dask.__version__) <= "2021.02.0", reason="new module")
def test_graph_manipulation():
"""dask.graph_manipulation passes an optional parameter, "rename", to the rebuilder
function returned by __dask_postperist__; also, the dsk passed to the rebuilder is
a HighLevelGraph whereas with dask.persist() and dask.optimize() it's a plain dict.
"""
import dask.graph_manipulation as gm
v = Variable(["x"], [1, 2]).chunk(-1).chunk(1) * 2
da = DataArray(v)
ds = Dataset({"d1": v[0], "d2": v[1], "d3": ("x", [3, 4])})
v2, da2, ds2 = gm.clone(v, da, ds)
assert_equal(v2, v)
assert_equal(da2, da)
assert_equal(ds2, ds)
for a, b in ((v, v2), (da, da2), (ds, ds2)):
assert a.__dask_layers__() != b.__dask_layers__()
assert len(a.__dask_layers__()) == len(b.__dask_layers__())
assert a.__dask_graph__().keys() != b.__dask_graph__().keys()
assert len(a.__dask_graph__()) == len(b.__dask_graph__())
assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()
assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)
# Above we performed a slice operation; adding the two slices back together creates
# a diamond-shaped dependency graph, which in turn will trigger a collision in layer
# names if we were to use HighLevelGraph.cull() instead of
# HighLevelGraph.cull_layers() in Dataset.__dask_postpersist__().
assert_equal(ds2.d1 + ds2.d2, ds.d1 + ds.d2)
| apache-2.0 |
belltailjp/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56311 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/tree/tests/test_tree.py | 72 | 47440 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
nvoron23/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 56 | 37976 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/ensemble/forest.py | 8 | 67993 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
alperyeg/elephant | elephant/current_source_density_src/icsd.py | 9 | 35175 | # -*- coding: utf-8 -*-
'''
py-iCSD toolbox!
Translation of the core functionality of the CSDplotter MATLAB package
to python.
The methods were originally developed by Klas H. Pettersen, as described in:
Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute T. Einevoll,
Current-source density estimation based on inversion of electrostatic forward
solution: Effects of finite extent of neuronal activity and conductivity
discontinuities, Journal of Neuroscience Methods, Volume 154, Issues 1-2,
30 June 2006, Pages 116-133, ISSN 0165-0270,
http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
The method themselves are implemented as callable subclasses of the base
CSD class object, which sets some common attributes,
and a basic function for calculating the iCSD, and a generic spatial filter
implementation.
The raw- and filtered CSD estimates are returned as Quantity arrays.
Requires pylab environment to work, i.e numpy+scipy+matplotlib, with the
addition of quantities (http://pythonhosted.org/quantities) and
neo (https://pythonhosted.org/neo)-
Original implementation from CSDplotter-0.1.1
(http://software.incf.org/software/csdplotter) by Klas. H. Pettersen 2005.
Written by:
- Espen.Hagen@umb.no, 2010,
- e.hagen@fz-juelich.de, 2015-2016
'''
import numpy as np
import scipy.integrate as si
import scipy.signal as ss
import quantities as pq
class CSD(object):
'''Base iCSD class'''
def __init__(self, lfp, f_type='gaussian', f_order=(3, 1)):
'''Initialize parent class iCSD
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps)
f_type : str
type of spatial filter, must be a scipy.signal filter design method
f_order : list
settings for spatial filter, arg passed to filter design function
'''
self.name = 'CSD estimate parent class'
self.lfp = lfp
self.f_matrix = np.eye(lfp.shape[0]) * pq.m**3 / pq.S
self.f_type = f_type
self.f_order = f_order
def get_csd(self, ):
'''
Perform the CSD estimate from the LFP and forward matrix F, i.e as
CSD=F**-1*LFP
Arguments
---------
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.linalg.solve(self.f_matrix, self.lfp)
return csd * (self.f_matrix.units**-1 * self.lfp.units).simplified
def filter_csd(self, csd, filterfunction='convolve'):
'''
Spatial filtering of the CSD estimate, using an N-point filter
Arguments
---------
csd : np.ndarrray * quantity.Quantity
Array with the csd estimate
filterfunction : str
'filtfilt' or 'convolve'. Apply spatial filter using
scipy.signal.filtfilt or scipy.signal.convolve.
'''
if self.f_type == 'gaussian':
try:
assert(len(self.f_order) == 2)
except AssertionError as ae:
raise ae('filter order f_order must be a tuple of length 2')
else:
try:
assert(self.f_order > 0 and isinstance(self.f_order, int))
except AssertionError as ae:
raise ae('Filter order must be int > 0!')
try:
assert(filterfunction in ['filtfilt', 'convolve'])
except AssertionError as ae:
raise ae("{} not equal to 'filtfilt' or \
'convolve'".format(filterfunction))
if self.f_type == 'boxcar':
num = ss.boxcar(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'hamming':
num = ss.hamming(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'triangular':
num = ss.triang(self.f_order)
denom = np.array([num.sum()])
elif self.f_type == 'gaussian':
num = ss.gaussian(self.f_order[0], self.f_order[1])
denom = np.array([num.sum()])
elif self.f_type == 'identity':
num = np.array([1.])
denom = np.array([1.])
else:
print('%s Wrong filter type!' % self.f_type)
raise
num_string = '[ '
for i in num:
num_string = num_string + '%.3f ' % i
num_string = num_string + ']'
denom_string = '[ '
for i in denom:
denom_string = denom_string + '%.3f ' % i
denom_string = denom_string + ']'
print(('discrete filter coefficients: \nb = {}, \
\na = {}'.format(num_string, denom_string)))
if filterfunction == 'filtfilt':
return ss.filtfilt(num, denom, csd, axis=0) * csd.units
elif filterfunction == 'convolve':
csdf = csd / csd.units
for i in range(csdf.shape[1]):
csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same')
return csdf * csd.units
class StandardCSD(CSD):
'''
Standard CSD method with and without Vaknin electrodes
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize standard CSD method class with & without Vaknin electrodes.
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m, must be monotonously increasing
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S/m
vaknin_el : bool
flag for using method of Vaknin to endpoint electrodes
Defaults to True
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
diff_diff_coord = np.diff(np.diff(coord_electrode)).magnitude
zeros_ddc = np.zeros_like(diff_diff_coord)
try:
assert(np.all(np.isclose(diff_diff_coord, zeros_ddc, atol=1e-12)))
except AssertionError as ae:
print('coord_electrode not monotonously varying')
raise ae
if self.vaknin_el:
# extend lfps array by duplicating potential at endpoint contacts
if lfp.ndim == 1:
self.lfp = np.empty((lfp.shape[0] + 2, )) * lfp.units
else:
self.lfp = np.empty((lfp.shape[0] + 2, lfp.shape[1])) * lfp.units
self.lfp[0, ] = lfp[0, ]
self.lfp[1:-1, ] = lfp
self.lfp[-1, ] = lfp[-1, ]
else:
self.lfp = lfp
self.name = 'Standard CSD method'
self.coord_electrode = coord_electrode
self.f_inv_matrix = self.get_f_inv_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.vaknin_el = kwargs.pop('vaknin_el', True)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_inv_matrix(self):
'''Calculate the inverse F-matrix for the standard CSD method'''
h_val = abs(np.diff(self.coord_electrode)[0])
f_inv = -np.eye(self.lfp.shape[0])
# Inner matrix elements is just the discrete laplacian coefficients
for j in range(1, f_inv.shape[0] - 1):
f_inv[j, j - 1: j + 2] = np.array([1., -2., 1.])
return f_inv * -self.sigma / h_val
def get_csd(self):
'''
Perform the iCSD calculation, i.e: iCSD=F_inv*LFP
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with the csd estimate
'''
csd = np.dot(self.f_inv_matrix, self.lfp)[1:-1, ]
# `np.dot()` does not return correct units, so the units of `csd` must
# be assigned manually
csd_units = (self.f_inv_matrix.units * self.lfp.units).simplified
csd = csd.magnitude * csd_units
return csd
class DeltaiCSD(CSD):
'''
delta-iCSD method
'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initialize the delta-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'delta-iCSD method'
self.coord_electrode = coord_electrode
# initialize F- and iCSD-matrices
self.f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size))
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix'''
f_matrix = np.empty((self.coord_electrode.size,
self.coord_electrode.size)) * self.coord_electrode.units
for j in range(self.coord_electrode.size):
for i in range(self.coord_electrode.size):
f_matrix[j, i] = ((np.sqrt((self.coord_electrode[j] -
self.coord_electrode[i])**2 +
(self.diam[j] / 2)**2) - abs(self.coord_electrode[j] -
self.coord_electrode[i])) +
(self.sigma - self.sigma_top) / (self.sigma +
self.sigma_top) *
(np.sqrt((self.coord_electrode[j] +
self.coord_electrode[i])**2 + (self.diam[j] / 2)**2)-
abs(self.coord_electrode[j] + self.coord_electrode[i])))
f_matrix /= (2 * self.sigma)
return f_matrix
class StepiCSD(CSD):
'''step-iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing step-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float or np.ndarray * quantity.Quantity
diameter(s) of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
h : float or np.ndarray * quantity.Quantity
assumed thickness of the source cylinders at all or each contact
Defaults to np.ones(15) * 100E-6 * pq.m
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise ae
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
else:
assert(self.diam > 0 * self.diam.units)
except AssertionError as ae:
print('diam must be positive scalar or of same shape \
as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
try:
assert(self.h.size == 1 or self.h.size == coord_electrode.size)
if self.h.size == coord_electrode.size:
assert(np.all(self.h > 0 * self.h.units))
except AssertionError as ae:
print('h must be scalar or of same shape as coord_electrode')
raise ae
if self.h.size == 1:
self.h = np.ones(coord_electrode.size) * self.h
self.name = 'step-iCSD method'
self.coord_electrode = coord_electrode
# compute forward-solution matrix
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.h = kwargs.pop('h', np.ones(23) * 100E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate F-matrix for step iCSD method'''
el_len = self.coord_electrode.size
f_matrix = np.zeros((el_len, el_len))
for j in range(el_len):
for i in range(el_len):
lower_int = self.coord_electrode[i] - self.h[j] / 2
if lower_int < 0:
lower_int = self.h[j].units
upper_int = self.coord_electrode[i] + self.h[j] / 2
# components of f_matrix object
f_cyl0 = si.quad(self._f_cylinder,
a=lower_int, b=upper_int,
args=(float(self.coord_electrode[j]),
float(self.diam[j]),
float(self.sigma)),
epsabs=self.tol)[0]
f_cyl1 = si.quad(self._f_cylinder, a=lower_int, b=upper_int,
args=(-float(self.coord_electrode[j]),
float(self.diam[j]), float(self.sigma)),
epsabs=self.tol)[0]
# method of images coefficient
mom = (self.sigma - self.sigma_top) / (self.sigma + self.sigma_top)
f_matrix[j, i] = f_cyl0 + mom * f_cyl1
# assume si.quad trash the units
return f_matrix * self.h.units**2 / self.sigma.units
def _f_cylinder(self, zeta, z_val, diam, sigma):
'''function used by class method'''
f_cyl = 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
return f_cyl
class SplineiCSD(CSD):
'''spline iCSD method'''
def __init__(self, lfp, coord_electrode, **kwargs):
'''
Initializing spline-iCSD method class object
Parameters
----------
lfp : np.ndarray * quantity.Quantity
LFP signal of shape (# channels, # time steps) in units of V
coord_electrode : np.ndarray * quantity.Quantity
depth of evenly spaced electrode contact points of shape
(# contacts, ) in units of m
diam : float * quantity.Quantity
diamater of the assumed circular planar current sources centered
at each contact
Defaults to 500E-6 meters
sigma : float * quantity.Quantity
conductivity of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
sigma_top : float * quantity.Quantity
conductivity on top of tissue in units of S/m or 1/(ohm*m)
Defaults to 0.3 S / m
tol : float
tolerance of numerical integration
Defaults 1e-6
f_type : str
type of spatial filter, must be a scipy.signal filter design method
Defaults to 'gaussian'
f_order : list
settings for spatial filter, arg passed to filter design function
Defaults to (3,1) for the gaussian
num_steps : int
number of data points for the spatially upsampled LFP/CSD data
Defaults to 200
'''
self.parameters(**kwargs)
CSD.__init__(self, lfp, self.f_type, self.f_order)
try: # Should the class not take care of this?!
assert(self.diam.units == coord_electrode.units)
except AssertionError as ae:
print('units of coord_electrode ({}) and diam ({}) differ'
.format(coord_electrode.units, self.diam.units))
raise
try:
assert(np.all(np.diff(coord_electrode) > 0))
except AssertionError as ae:
print('values of coord_electrode not continously increasing')
raise ae
try:
assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
if self.diam.size == coord_electrode.size:
assert(np.all(self.diam > 0 * self.diam.units))
except AssertionError as ae:
print('diam must be scalar or of same shape as coord_electrode')
raise ae
if self.diam.size == 1:
self.diam = np.ones(coord_electrode.size) * self.diam
self.name = 'spline-iCSD method'
self.coord_electrode = coord_electrode
# compute stuff
self.f_matrix = self.get_f_matrix()
def parameters(self, **kwargs):
'''Defining the default values of the method passed as kwargs
Parameters
----------
**kwargs
Same as those passed to initialize the Class
'''
self.diam = kwargs.pop('diam', 500E-6 * pq.m)
self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
self.tol = kwargs.pop('tol', 1e-6)
self.num_steps = kwargs.pop('num_steps', 200)
self.f_type = kwargs.pop('f_type', 'gaussian')
self.f_order = kwargs.pop('f_order', (3, 1))
if kwargs:
raise TypeError('Invalid keyword arguments:', kwargs.keys())
def get_f_matrix(self):
'''Calculate the F-matrix for cubic spline iCSD method'''
el_len = self.coord_electrode.size
z_js = np.zeros(el_len + 1)
z_js[:-1] = np.array(self.coord_electrode)
z_js[-1] = z_js[-2] + float(np.diff(self.coord_electrode).mean())
# Define integration matrixes
f_mat0 = np.zeros((el_len, el_len + 1))
f_mat1 = np.zeros((el_len, el_len + 1))
f_mat2 = np.zeros((el_len, el_len + 1))
f_mat3 = np.zeros((el_len, el_len + 1))
# Calc. elements
for j in range(el_len):
for i in range(el_len):
f_mat0[j, i] = si.quad(self._f_mat0, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat1[j, i] = si.quad(self._f_mat1, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat2[j, i] = si.quad(self._f_mat2, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
f_mat3[j, i] = si.quad(self._f_mat3, a=z_js[i], b=z_js[i + 1],
args=(z_js[j + 1], z_js[i],
float(self.sigma),
float(self.diam[j])),
epsabs=self.tol)[0]
# image technique if conductivity not constant:
if self.sigma != self.sigma_top:
f_mat0[j, i] = f_mat0[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat0, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1],
float(self.sigma), float(self.diam[j])), \
epsabs=self.tol)[0]
f_mat1[j, i] = f_mat1[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat1, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat2[j, i] = f_mat2[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat2, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
f_mat3[j, i] = f_mat3[j, i] + (self.sigma-self.sigma_top) / \
(self.sigma + self.sigma_top) * \
si.quad(self._f_mat3, a=z_js[i], b=z_js[i+1], \
args=(-z_js[j+1], z_js[i], float(self.sigma),
float(self.diam[j])), epsabs=self.tol)[0]
e_mat0, e_mat1, e_mat2, e_mat3 = self._calc_e_matrices()
# Calculate the F-matrix
f_matrix = np.eye(el_len + 2)
f_matrix[1:-1, :] = np.dot(f_mat0, e_mat0) + \
np.dot(f_mat1, e_mat1) + \
np.dot(f_mat2, e_mat2) + \
np.dot(f_mat3, e_mat3)
return f_matrix * self.coord_electrode.units**2 / self.sigma.units
def get_csd(self):
'''
Calculate the iCSD using the spline iCSD method
Returns
-------
csd : np.ndarray * quantity.Quantity
Array with csd estimate
'''
e_mat = self._calc_e_matrices()
el_len = self.coord_electrode.size
# padding the lfp with zeros on top/bottom
if self.lfp.ndim == 1:
cs_lfp = np.r_[[0], np.asarray(self.lfp), [0]].reshape(1, -1).T
csd = np.zeros(self.num_steps)
else:
cs_lfp = np.vstack((np.zeros(self.lfp.shape[1]),
np.asarray(self.lfp),
np.zeros(self.lfp.shape[1])))
csd = np.zeros((self.num_steps, self.lfp.shape[1]))
cs_lfp *= self.lfp.units
# CSD coefficients
csd_coeff = np.linalg.solve(self.f_matrix, cs_lfp)
# The cubic spline polynomial coefficients
a_mat0 = np.dot(e_mat[0], csd_coeff)
a_mat1 = np.dot(e_mat[1], csd_coeff)
a_mat2 = np.dot(e_mat[2], csd_coeff)
a_mat3 = np.dot(e_mat[3], csd_coeff)
# Extend electrode coordinates in both end by min contact interdistance
h = np.diff(self.coord_electrode).min()
z_js = np.zeros(el_len + 2)
z_js[0] = self.coord_electrode[0] - h
z_js[1: -1] = self.coord_electrode
z_js[-1] = self.coord_electrode[-1] + h
# create high res spatial grid
out_zs = np.linspace(z_js[1], z_js[-2], self.num_steps)
# Calculate iCSD estimate on grid from polynomial coefficients.
i = 0
for j in range(self.num_steps):
if out_zs[j] >= z_js[i + 1]:
i += 1
csd[j, ] = a_mat0[i, :] + a_mat1[i, :] * \
(out_zs[j] - z_js[i]) + \
a_mat2[i, :] * (out_zs[j] - z_js[i])**2 + \
a_mat3[i, :] * (out_zs[j] - z_js[i])**3
csd_unit = (self.f_matrix.units**-1 * self.lfp.units).simplified
return csd * csd_unit
def _f_mat0(self, zeta, z_val, sigma, diam):
'''0'th order potential function'''
return 1. / (2. * sigma) * \
(np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
def _f_mat1(self, zeta, z_val, zi_val, sigma, diam):
'''1'th order potential function'''
return (zeta - zi_val) * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat2(self, zeta, z_val, zi_val, sigma, diam):
'''2'nd order potential function'''
return (zeta - zi_val)**2 * self._f_mat0(zeta, z_val, sigma, diam)
def _f_mat3(self, zeta, z_val, zi_val, sigma, diam):
'''3'rd order potential function'''
return (zeta - zi_val)**3 * self._f_mat0(zeta, z_val, sigma, diam)
def _calc_k_matrix(self):
'''Calculate the K-matrix used by to calculate E-matrices'''
el_len = self.coord_electrode.size
h = float(np.diff(self.coord_electrode).min())
c_jm1 = np.eye(el_len + 2, k=0) / h
c_jm1[0, 0] = 0
c_j0 = np.eye(el_len + 2) / h
c_j0[-1, -1] = 0
c_jall = c_j0
c_jall[0, 0] = 1
c_jall[-1, -1] = 1
tjp1 = np.eye(el_len + 2, k=1)
tjm1 = np.eye(el_len + 2, k=-1)
tj0 = np.eye(el_len + 2)
tj0[0, 0] = 0
tj0[-1, -1] = 0
# Defining K-matrix used to calculate e_mat1-3
return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +
2 * np.dot(c_jm1, tj0) +
2 * c_jall +
np.dot(c_j0, tjp1)),
3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -
np.dot(np.dot(c_jm1, c_jm1), tjm1) +
np.dot(np.dot(c_j0, c_j0), tjp1) -
np.dot(np.dot(c_j0, c_j0), tj0)))
def _calc_e_matrices(self):
'''Calculate the E-matrices used by cubic spline iCSD method'''
el_len = self.coord_electrode.size
# expanding electrode grid
h = float(np.diff(self.coord_electrode).min())
# Define transformation matrices
c_mat3 = np.eye(el_len + 1) / h
# Get K-matrix
k_matrix = self._calc_k_matrix()
# Define matrixes for C to A transformation:
tja = np.eye(el_len + 2)[:-1, ]
tjp1a = np.eye(el_len + 2, k=1)[:-1, ]
# Define spline coefficients
e_mat0 = tja
e_mat1 = np.dot(tja, k_matrix)
e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \
np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)
e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \
np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)
return e_mat0, e_mat1, e_mat2, e_mat3
if __name__ == '__main__':
from scipy.io import loadmat
import matplotlib.pyplot as plt
#loading test data
test_data = loadmat('test_data.mat')
#prepare lfp data for use, by changing the units to SI and append quantities,
#along with electrode geometry, conductivities and assumed source geometry
lfp_data = test_data['pot1'] * 1E-6 * pq.V # [uV] -> [V]
z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m # [m]
diam = 500E-6 * pq.m # [m]
h = 100E-6 * pq.m # [m]
sigma = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
sigma_top = 0.3 * pq.S / pq.m # [S/m] or [1/(ohm*m)]
# Input dictionaries for each method
delta_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam, # source diameter
'sigma' : sigma, # extracellular conductivity
'sigma_top' : sigma, # conductivity on top of cortex
'f_type' : 'gaussian', # gaussian filter
'f_order' : (3, 1), # 3-point filter, sigma = 1.
}
step_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'h' : h, # source thickness
'sigma' : sigma,
'sigma_top' : sigma,
'tol' : 1E-12, # Tolerance in numerical integration
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
spline_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'diam' : diam,
'sigma' : sigma,
'sigma_top' : sigma,
'num_steps' : 201, # Spatial CSD upsampling to N steps
'tol' : 1E-12,
'f_type' : 'gaussian',
'f_order' : (20, 5),
}
std_input = {
'lfp' : lfp_data,
'coord_electrode' : z_data,
'sigma' : sigma,
'f_type' : 'gaussian',
'f_order' : (3, 1),
}
#Create the different CSD-method class instances. We use the class methods
#get_csd() and filter_csd() below to get the raw and spatially filtered
#versions of the current-source density estimates.
csd_dict = dict(
delta_icsd = DeltaiCSD(**delta_input),
step_icsd = StepiCSD(**step_input),
spline_icsd = SplineiCSD(**spline_input),
std_csd = StandardCSD(**std_input),
)
#plot
for method, csd_obj in list(csd_dict.items()):
fig, axes = plt.subplots(3,1, figsize=(8,8))
#plot LFP signal
ax = axes[0]
im = ax.imshow(np.array(lfp_data), origin='upper', vmin=-abs(lfp_data).max(), \
vmax=abs(lfp_data).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
cb = plt.colorbar(im, ax=ax)
cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
ax.set_xticklabels([])
ax.set_title('LFP')
ax.set_ylabel('ch #')
#plot raw csd estimate
csd = csd_obj.get_csd()
ax = axes[1]
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name)
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_xticklabels([])
ax.set_ylabel('ch #')
#plot spatially filtered csd estimate
ax = axes[2]
csd = csd_obj.filter_csd(csd)
im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
ax.axis(ax.axis('tight'))
ax.set_title(csd_obj.name + ', filtered')
cb = plt.colorbar(im, ax=ax)
cb.set_label('CSD (%s)' % csd.dimensionality.string)
ax.set_ylabel('ch #')
ax.set_xlabel('timestep')
plt.show()
| bsd-3-clause |
mgahsan/QuantEcon.py | quantecon/models/solow/model.py | 7 | 38654 | r"""
======================
The Solow Growth Model
======================
The following summary of the [solow1956] model of economic growth
largely follows [romer2011].
Assumptions
===========
The production function
----------------------------------------------
The [solow1956] model of economic growth focuses on the behavior of four
variables: output, `Y`, capital, `K`, labor, `L`, and knowledge (or
technology or the ``effectiveness of labor''), `A`. At each point in
time the economy has some amounts of capital, labor, and knowledge that
can be combined to produce output according to some production function,
`F`.
.. math::
Y(t) = F(K(t), A(t)L(t))
where `t` denotes time.
The evolution of the inputs to production
-----------------------------------------
The initial levels of capital, :math:`K_0`, labor, :math:`L_0`, and
technology, :math:`A_0`, are taken as given. Labor and technology are
assumed to grow at constant rates:
.. math::
\dot{A}(t) = gA(t)
\dot{L}(t) = nL(t)
where the rate of technological progrss, `g`, and the population growth
rate, `n`, are exogenous parameters.
Output is divided between consumption and investment. The fraction of
output devoted to investment, :math:`0 < s < 1`, is exogenous and
constant. One unit of output devoted to investment yields one unit of
new capital. Capital is assumed to decpreciate at a rate :math:`0\le
\delta`. Thus aggregate capital stock evolves according to
.. math::
\dot{K}(t) = sY(t) - \delta K(t).
Although no restrictions are placed on the rates of technological
progress and population growth, the sum of `g`, `n`, and :math:`delta`
is assumed to be positive.
The dynamics of the model
=========================
Because the economy is growing over time (due to exogenous technological
progress and population growth) it is useful to focus on the behavior of
capital stock per unit of effective labor, :math:`k\equiv K/AL`.
Applying the chain rule to the equation of motion for capital stock
yields (after a bit of algebra!) an equation of motion for capital stock
per unit of effective labor.
.. math::
\dot{k}(t) = s f(k) - (g + n + \delta)k(t)
References
==========
.. [romer2011] D. Romer. *Advanced Macroeconomics, 4th edition*, MacGraw Hill, 2011.
.. [solow1956] R. Solow. *A contribution to the theory of economic growth*, Quarterly Journal of Economics, 70(1):64-95, 1956.
@author : David R. Pugh
@date : 2014-11-27
"""
from __future__ import division
import collections
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
from scipy import optimize
import sympy as sym
from ... import ivp
from . import impulse_response
# declare key variables for the model
t, X = sym.symbols('t'), sym.DeferredVector('X')
A, k, K, L, Y = sym.symbols('A, k, K, L, Y')
# declare required model parameters
g, n, s, delta = sym.symbols('g, n, s, delta')
class Model(object):
__intensive_output = None
__mpk = None
__numeric_jacobian = None
__numeric_solow_residual = None
__numeric_system = None
_modules = [{'ImmutableMatrix': np.array}, "numpy"]
_required_params = ['g', 'n', 's', 'delta', 'A0', 'L0']
def __init__(self, output, params):
"""
Create an instance of the Solow growth model.
Parameters
----------
output : sym.Basic
Symbolic expression defining the aggregate production
function.
params : dict
Dictionary of model parameters.
"""
self.irf = impulse_response.ImpulseResponse(self)
self.output = output
self.params = params
def __repr__(self):
"""Machine readable summary of a Model instance."""
return self.__str__()
def __str__(self):
"""Human readable summary of a Model instance."""
m = """
Solow (1956) model of economic growth:
- Output : {Y}
- A0 (initial level of technology) : {A0:g}
- L0 (initial amount of available labor) : {L0:g}
- g (growth rate of technology) : {g:g}
- n (growth rate of the labor force) : {n:g}
- s (savings rate) : {s:g}
- delta (depreciation rate of physical capital) : {delta:g}
"""
formatted_str = dedent(m.format(Y=self.output,
A0=self.params['A0'],
L0=self.params['L0'],
g=self.params['g'],
n=self.params['n'],
s=self.params['s'],
delta=self.params['delta']))
return formatted_str
@property
def _intensive_output(self):
"""
:getter: Return vectorized symbolic intensive aggregate production.
:type: function
"""
if self.__intensive_output is None:
args = [k] + sym.symbols(list(self.params.keys()))
self.__intensive_output = sym.lambdify(args, self.intensive_output,
self._modules)
return self.__intensive_output
@property
def _mpk(self):
"""
:getter: Return vectorized symbolic marginal product capital.
:type: function
"""
if self.__mpk is None:
args = [k] + sym.symbols(list(self.params.keys()))
self.__mpk = sym.lambdify(args, self.marginal_product_capital,
self._modules)
return self.__mpk
@property
def _numeric_jacobian(self):
"""
Vectorized, numpy-aware function defining the Jacobian matrix of
partial derivatives.
:getter: Return vectorized Jacobian matrix of partial derivatives.
:type: function
"""
if self.__numeric_jacobian is None:
self.__numeric_jacobian = sym.lambdify(self._symbolic_args,
self._symbolic_jacobian,
self._modules)
return self.__numeric_jacobian
@property
def _numeric_solow_residual(self):
"""
Vectorized, numpy-aware function defining the Solow residual.
:getter: Return vectorized symbolic Solow residual.
:type: function
"""
if self.__numeric_solow_residual is None:
tmp_args = [Y, K, L] + sym.symbols(list(self.params.keys()))
self.__numeric_solow_residual = sym.lambdify(tmp_args,
self.solow_residual,
self._modules)
return self.__numeric_solow_residual
@property
def _numeric_system(self):
"""
Vectorized, numpy-aware function defining the system of ODEs.
:getter: Return vectorized symbolic system of ODEs.
:type: function
"""
if self.__numeric_system is None:
self.__numeric_system = sym.lambdify(self._symbolic_args,
self._symbolic_system,
self._modules)
return self.__numeric_system
@property
def _symbolic_args(self):
"""
List of symbolic arguments used in constructing vectorized
versions of _symbolic_system and _symbolic_jacobian.
:getter: Return list of symbolic arguments.
:type: list
"""
args = [t, X] + sym.symbols(list(self.params.keys()))
return args
@property
def _symbolic_jacobian(self):
"""
Symbolic Jacobian matrix for the system of ODEs.
:getter: Return the symbolic Jacobian matrix.
:type: sym.MutableDenseMatrix
"""
N = self._symbolic_system.shape[0]
return self._symbolic_system.jacobian([X[i] for i in range(N)])
@property
def _symbolic_system(self):
"""
Symbolic matrix defining the system of ODEs.
:getter: Return the matrix defining the system of ODEs.
:type: sym.MutableDenseMatrix
"""
change_of_vars = {k: X[0]}
return sym.Matrix([self.k_dot]).subs(change_of_vars)
@property
def effective_depreciation_rate(self):
"""
Effective depreciation rate for capital stock (per unit
effective labor).
:getter: Return the current effective depreciation rate.
:type: float
Notes
-----
The effective depreciation rate of physical capital takes into
account both technological progress and population growth, as
well as physical depreciation.
"""
return sum(self.params[key] for key in ['g', 'n', 'delta'])
@property
def intensive_output(self):
r"""
Symbolic expression for the intensive form of aggregate
production.
:getter: Return the current intensive production function.
:type: sym.Basic
Notes
-----
The assumption of constant returns to scale allows us to work
the the intensive form of the aggregate production function,
`F`. Defining :math:`c=1/AL` one can write
..math::
F\bigg(\frac{K}{AL}, 1\bigg) = \frac{1}{AL}F(A, K, L)
Defining :math:`k=K/AL` and :math:`y=Y/AL` to be capital per
unit effective labor and output per unit effective labor,
respectively, the intensive form of the production function can
be written as
.. math::
y = f(k).
Additional assumptions are that `f` satisfies :math:`f(0)=0`, is
concave (i.e., :math:`f'(k) > 0, f''(k) < 0`), and satisfies the
Inada conditions:
.. math::
:type: eqnarray
\lim_{k \rightarrow 0} &=& \infty \\
\lim_{k \rightarrow \infty} &=& 0
The [inada1964]_ conditions are sufficient (but not necessary!)
to ensure that the time path of capital per effective worker
does not explode.
.. [inada1964] K. Inda. *Some structural characteristics of Turnpike Theorems*, Review of Economic Studies, 31(1):43-58, 1964.
"""
return self.output.subs({'A': 1.0, 'K': k, 'L': 1.0})
@property
def ivp(self):
r"""
Initial value problem
:getter: Return an instance of the ivp.IVP class representing
the Solow model.
:type: ivp.IVP
Notes
-----
The Solow model with can be formulated as an initial value
problem (IVP) as follows.
.. math::
\dot{k}(t) = sf(k(t)) - (g + n + \delta)k(t),\ t\ge t_0,\ k(t_0) = k_0
The solution to this IVP is a function :math:`k(t)` describing
the time path of capital stock (per unit effective labor).
"""
tmp_ivp = ivp.IVP(self._numeric_system, self._numeric_jacobian)
tmp_ivp.f_params = tuple(self.params.values())
tmp_ivp.jac_params = tuple(self.params.values())
return tmp_ivp
@property
def k_dot(self):
r"""
Symbolic expression for the equation of motion for capital (per
unit effective labor).
:getter: Return the current equation of motion for capital (per
unit effective labor).
:type: sym.Basic
Notes
-----
Because the economy is growing over time due to technological
progress, `g`, and population growth, `n`, it makes sense to
focus on the capital stock per unit effective labor, `k`, rather
than aggregate physical capital, `K`. Since, by definition,
:math:`k=K/AL`, we can apply the chain rule to the time derative
of `k`.
.. math::
:type: eqnarray
\dot{k}(t) =& \frac{\dot{K}(t)}{A(t)L(t)} - \frac{K(t)}{[A(t)L(t)]^2}\bigg[\dot{A}(t)L(t) + \dot{L}(t)A(t)\bigg] \\
=& \frac{\dot{K}(t)}{A(t)L(t)} - \bigg(\frac{\dot{A}(t)}{A(t)} + \frac{\dot{L}(t)}{L(t)}\bigg)\frac{K(t)}{A(t)L(t)}
By definition, math:`k=K/AL`, and by assumption
:math:`\dot{A}/A` and :math:`\dot{L}/L` are `g` and `n`
respectively. Aggregate capital stock evolves according to
.. math::
\dot{K}(t) = sF(K(t), A(t)L(t)) - \delta K(t).
Substituting these facts into the above equation yields the
equation of motion for capital stock (per unit effective labor).
.. math::
:type: eqnarray
\dot{k}(t) =& \frac{sF(K(t), A(t)L(t)) - \delta K(t)}{A(t)L(t)} - (g + n)k(t) \\
=& \frac{sY(t)}{A(t)L(t)} - (g + n + \delta)k(t) \\
=& sf(k(t)) - (g + n + \delta)k(t)
"""
return s * self.intensive_output - (g + n + delta) * k
@property
def marginal_product_capital(self):
r"""
Symbolic expression for the marginal product of capital (per
unit effective labor).
:getter: Return the current marginal product of capital (per
unit effective labor).
:type: sym.Basic
Notes
-----
The marginal product of capital is defined as follows:
.. math::
\frac{\partial F(K, AL)}{\partial K} \equiv f'(k)
where :math:`k=K/AL` is capital stock (per unit effective labor)
"""
return sym.diff(self.intensive_output, k)
@property
def output(self):
r"""
Symbolic expression for the aggregate production function.
:getter: Return the current aggregate production function.
:setter: Set a new aggregate production function
:type: sym.Basic
Notes
-----
At each point in time the economy has some amounts of capital,
`K`, labor, `L`, and knowledge (or technology), `A`, that can be
combined to produce output, `Y`, according to some function,
`F`.
.. math::
Y(t) = F(K(t), A(t)L(t))
where `t` denotes time. Note that `A` and `L` are assumed to
enter multiplicatively. Typically `A(t)L(t)` denotes "effective
labor", and technology that enters in this fashion is known as
labor-augmenting or "Harrod neutral."
A key assumption of the model is that the function `F` exhibits
constant returns to scale in capital and labor inputs.
Specifically,
.. math::
F(cK(t), cA(t)L(t)) = cF(K(t), A(t)L(t)) = cY(t)
for any :math:`c \ge 0`.
"""
return self._output
@property
def params(self):
"""
Dictionary of model parameters.
:getter: Return the current dictionary of model parameters.
:setter: Set a new dictionary of model parameters.
:type: dict
Notes
-----
The following parameters are required:
A0: float
Initial level of technology. Must satisfy :math:`A_0 > 0 `.
L0: float
Initial amount of available labor. Must satisfy
:math:`L_0 > 0 `.
g : float
Growth rate of technology.
n : float
Growth rate of the labor force.
s : float
Savings rate. Must satisfy `0 < s < 1`.
delta : float
Depreciation rate of physical capital. Must satisfy
:math:`0 < \delta`.
Although no restrictions are placed on the rates of
technological progress and population growth, the sum of `g`,
`n`, and :math:`delta` is assumed to be positive. The user mus
also specify any additional model parameters specific to the
chosen aggregate production function.
"""
return self._params
@property
def solow_residual(self):
"""
Symbolic expression for the Solow residual which is used as a
measure of technology.
:getter: Return the symbolic expression.
:type: sym.Basic
"""
return sym.solve(Y - self.output, A)[0]
@property
def speed_of_convergence(self):
r"""
The speed of convergence for the Solow model.
:getter: Return the current speed of convergence.
:type: float
Notes
-----
The following is a derivation for the speed of convergence
:math:`\lambda`:
.. :math::
:type: eqnarray
\lambda \equiv -\frac{\partial \dot{k}(k(t))}{\partial k(t)}\bigg|_{k(t)=k^*} =& -[sf'(k^*) - (g + n+ \delta)] \\
=& (g + n+ \delta) - sf'(k^*) \\
=& (g + n + \delta) - (g + n + \delta)\frac{k^*f'(k^*)}{f(k^*)} \\
=& (1 - \alpha_K(k^*))(g + n + \delta)
where the elasticity of output with respect to capital,
$\alpha_K(k)$, is defined as
.. :math::
\alpha_K(k) = \frac{k'(k)}{f(k)}.
"""
alpha_K = self.evaluate_output_elasticity(self.steady_state)
return (1 - alpha_K) * self.effective_depreciation_rate
@property
def steady_state(self):
r"""
Steady state value of capital stock (per unit effective labor).
:getter: Return the current steady state value.
:type: float
Notes
-----
The steady state value of capital stock (per unit effective
labor), `k`, is defined as the value of `k` that solves
.. math::
0 = sf(k) - (g + n + \delta)k
where `s` is the savings rate, `f(k)` is intensive output, and
:math:`g + n + \delta` is the effective depreciation rate.
"""
lower, upper = 1e-12, 1e12
return self.find_steady_state(lower, upper)
@output.setter
def output(self, value):
"""Set a new production function."""
self._output = self._validate_output(value)
self._clear_cache()
@params.setter
def params(self, value):
"""Set a new parameter dictionary."""
valid_params = self._validate_params(value)
self._params = self._order_params(valid_params)
def _clear_cache(self):
"""Clear cached values."""
self.__intensive_output = None
self.__mpk = None
self.__numeric_jacobian = None
self.__numeric_solow_residual = None
self.__numeric_system = None
@staticmethod
def _order_params(params):
"""Cast a dictionary to an order dictionary."""
return collections.OrderedDict(sorted(params.items()))
def _validate_output(self, output):
"""Validate the production function."""
if not isinstance(output, sym.Basic):
mesg = ("Output must be an instance of {}.".format(sym.Basic))
raise AttributeError(mesg)
elif not ({A, K, L} < output.atoms()):
mesg = ("Output must be an expression of technology, 'A', " +
"capital, 'K', and labor, 'L'.")
raise AttributeError(mesg)
else:
return output
def _validate_params(self, params):
"""Validate the model parameters."""
if not isinstance(params, dict):
mesg = "SolowModel.params must be a dict, not a {}."
raise AttributeError(mesg.format(params.__class__))
elif not set(self._required_params) <= set(params.keys()):
mesg = "One of the required params in {} has not been specified."
raise AttributeError(mesg.format(self._required_params))
elif params['s'] <= 0.0 or params['s'] >= 1.0:
raise AttributeError('Savings rate must be in (0, 1).')
elif params['delta'] <= 0.0 or params['delta'] >= 1.0:
raise AttributeError('Depreciation rate must be in (0, 1).')
elif params['g'] + params['n'] + params['delta'] <= 0.0:
raise AttributeError("Sum of g, n, and delta must be positive.")
elif params['A0'] <= 0.0:
mesg = "Initial value for technology must be strictly positive."
raise AttributeError(mesg)
elif params['L0'] <= 0.0:
mesg = "Initial value for labor supply must be strictly positive."
raise AttributeError(mesg)
else:
return params
def evaluate_actual_investment(self, k):
"""
Return the amount of output (per unit of effective labor)
invested in the production of new capital.
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
actual_inv : array_like (float)
Investment (per unit of effective labor)
"""
actual_inv = self.params['s'] * self.evaluate_intensive_output(k)
return actual_inv
def evaluate_consumption(self, k):
"""
Return the amount of consumption (per unit of effective labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
c : ndarray (float)
Consumption (per unit of effective labor)
"""
c = (self.evaluate_intensive_output(k) -
self.evaluate_actual_investment(k))
return c
def evaluate_effective_depreciation(self, k):
"""
Return amount of Capital stock (per unit of effective labor)
that depreciaties due to technological progress, population
growth, and physical depreciation.
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
effective_depreciation : array_like (float)
Amount of depreciated Capital stock (per unit of effective
labor)
"""
effective_depreciation = self.effective_depreciation_rate * k
return effective_depreciation
def evaluate_intensive_output(self, k):
"""
Return the amount of output (per unit of effective labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
y : ndarray (float)
Output (per unit of effective labor)
"""
y = self._intensive_output(k, *self.params.values())
return y
def evaluate_k_dot(self, k):
"""
Return time derivative of capital stock (per unit of effective
labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
k_dot : ndarray (float)
Time derivative of capital stock (per unit of effective
labor).
"""
k_dot = (self.evaluate_actual_investment(k) -
self.evaluate_effective_depreciation(k))
return k_dot
def evaluate_mpk(self, k):
"""
Return marginal product of capital stock (per unit of effective
labor).
Parameters
----------
k : ndarray (float)
Capital stock (per unit of effective labor)
Returns
-------
mpk : ndarray (float)
Marginal product of capital stock (per unit of effective
labor).
"""
mpk = self._mpk(k, *self.params.values())
return mpk
def evaluate_output_elasticity(self, k):
"""
Return elasticity of output with respect to capital stock (per
unit effective labor).
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
alpha_k : array_like (float)
Elasticity of output with respect to capital stock (per unit
effective labor).
Notes
-----
Under the additional assumption that markets are perfectly
competitive, the elasticity of output with respect to capital
stock is equivalent to capital's share of income. Since, under
perfect competition, firms earn zero profits it must be true
capital's share and labor's share must sum to one.
"""
alpha_k = (k*self.evaluate_mpk(k)) / self.evaluate_intensive_output(k)
return alpha_k
def evaluate_solow_residual(self, Y, K, L):
"""
Return Solow residual.
Parameters
----------
k : array_like (float)
Capital stock (per unit of effective labor)
Returns
-------
residual : array_like (float)
Solow residual
"""
residual = self._numeric_solow_residual(Y, K, L, *self.params.values())
assert residual.all() > 0, "Solow residual show always be positive!"
return residual
def find_steady_state(self, a, b, method='brentq', **kwargs):
"""
Compute the equilibrium value of capital stock (per unit
effective labor).
Parameters
----------
a : float
One end of the bracketing interval [a,b].
b : float
The other end of the bracketing interval [a,b]
method : str (default=`brentq`)
Method to use when computing the steady state. Supported
methods are `bisect`, `brenth`, `brentq`, `ridder`. See
`scipy.optimize` for more details (including references).
kwargs : optional
Additional keyword arguments. Keyword arguments are method
specific see `scipy.optimize` for details.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In
particular, ``r.converged`` is True if the routine
converged.
"""
if method == 'bisect':
result = optimize.bisect(self.evaluate_k_dot, a, b, **kwargs)
elif method == 'brenth':
result = optimize.brenth(self.evaluate_k_dot, a, b, **kwargs)
elif method == 'brentq':
result = optimize.brentq(self.evaluate_k_dot, a, b, **kwargs)
elif method == 'ridder':
result = optimize.ridder(self.evaluate_k_dot, a, b, **kwargs)
else:
mesg = ("Method must be one of : 'bisect', 'brenth', 'brentq', " +
"or 'ridder'.")
raise ValueError(mesg)
return result
def linearized_solution(self, t, k0):
"""
Compute the linearized solution for the Solow model.
Parameters
----------
t : ndarray (shape=(T,))
Array of points at which the solution is desired.
k0 : (float)
Initial condition for capital stock (per unit of effective
labor)
Returns
-------
linearized_traj : ndarray (shape=t.size, 2)
Array representing the linearized solution trajectory.
"""
kt = (self.steady_state + np.exp(-self.speed_of_convergence * t) *
(k0 - self.steady_state))
linearized_traj = np.hstack((t[:, np.newaxis], kt[:, np.newaxis]))
return linearized_traj
def plot_factor_shares(self, ax, Nk=1e3, **new_params):
"""
Plot income/output shares of capital and labor inputs to
production.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
capitals_share_line : maplotlib.lines.Line2D
A Line2D object representing the time path for capital's
share of income.
labors_share_line : maplotlib.lines.Line2D
A Line2D object representing the time path for labor's
share of income.
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
capitals_share = self.evaluate_output_elasticity(k_grid)
labors_share = 1 - capitals_share
capitals_share_line, = ax.plot(k_grid, capitals_share, 'r-',
label=r'$\alpha_K(k(t))$')
labors_share_line, = ax.plot(k_grid, labors_share, 'b-',
label=r'$1 - \alpha_K(k(t))$')
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_title('Factor shares', family='serif', fontsize=20)
ax.grid(True)
ax.legend(loc=0, frameon=False, prop={'family': 'serif'},
bbox_to_anchor=(1.0, 1.0))
return [capitals_share_line, labors_share_line]
def plot_intensive_output(self, ax, Nk=1e3, **new_params):
"""
Plot intensive form of the aggregate production function.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
intensive_output : maplotlib.lines.Line2D
A Line2D object representing intensive output as a function
of capital stock (per unit effective labor).
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
y_grid = self.evaluate_intensive_output(k_grid)
intensive_output_line, = ax.plot(k_grid, y_grid, 'r-')
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_ylabel('$f(k(t))$', family='serif', fontsize=20,
rotation='horizontal')
ax.yaxis.set_label_coords(-0.1, 0.5)
ax.set_title('Output (per unit effective labor)',
family='serif', fontsize=20)
ax.grid(True)
return [intensive_output_line]
def plot_intensive_investment(self, ax, Nk=1e3, **new_params):
"""
Plot actual investment (per unit effective labor) and effective
depreciation. The steady state value of capital stock (per unit
effective labor) balance acual investment and effective
depreciation.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
actual_investment_line : maplotlib.lines.Line2D
A Line2D object representing the level of actual investment
as a function of capital stock (per unit effective labor).
breakeven_investment_line : maplotlib.lines.Line2D
A Line2D object representing the "break-even" level of
investment as a function of capital stock (per unit
effective labor).
ss_line : maplotlib.lines.Line2D
A Line2D object representing the steady state level of
investment.
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
actual_investment_grid = self.evaluate_actual_investment(k_grid)
breakeven_investment_grid = self.evaluate_effective_depreciation(k_grid)
ss_investment = self.evaluate_actual_investment(self.steady_state)
actual_investment_line, = ax.plot(k_grid, actual_investment_grid, 'g-',
label='$sf(k(t))$')
breakeven_investment_line, = ax.plot(k_grid, breakeven_investment_grid,
'b-', label='$(g + n + \delta)k(t)$')
ss_line, = ax.plot(self.steady_state, ss_investment, 'ko',
label='$k^*={0:.4f}$'.format(self.steady_state))
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_ylabel('Investment (per unit effective labor)', family='serif',
fontsize=15)
ax.set_title('Output (per unit effective labor)',
family='serif', fontsize=20)
ax.grid(True)
ax.legend(loc=0, frameon=False, prop={'family': 'serif'},
bbox_to_anchor=(1.0, 1.0))
return [actual_investment_line, breakeven_investment_line, ss_line]
def plot_phase_diagram(self, ax, Nk=1e3, **new_params):
"""
Plot the model's phase diagram.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
k_dot_line : maplotlib.lines.Line2D
A Line2D object representing the rate of change of capital
stock (per unit effective labor) as a function of its level.
origin_line : maplotlib.lines.Line2D
A Line2D object representing the origin (i.e., locus of
points where k_dot is zero).
ss_line : maplotlib.lines.Line2D
A Line2D object representing the steady state level of
capital stock (per unit effective labor).
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
k_dot_line, = ax.plot(k_grid, self.evaluate_k_dot(k_grid),
color='orange')
origin_line = ax.axhline(0, color='k')
ss_line, = ax.plot(self.steady_state, 0.0, 'ko',
label='$k^*={0:.4f}$'.format(self.steady_state))
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_ylabel('$\dot{k}(t)$', family='serif', fontsize=25,
rotation='horizontal')
ax.yaxis.set_label_coords(-0.1, 0.5)
ax.set_title('Phase diagram', family='serif', fontsize=20)
ax.grid(True)
return [k_dot_line, origin_line, ss_line]
def plot_solow_diagram(self, ax, Nk=1e3, **new_params):
"""
Plot the classic Solow diagram.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
Nk : float (default=1e3)
Number of capital stock (per unit of effective labor) grid
points.
new_params : dict (optional)
Optional dictionary of parameter values to change.
Returns
-------
A list containing...
actual_investment_line : maplotlib.lines.Line2D
A Line2D object representing the level of actual investment
as a function of capital stock (per unit effective labor).
breakeven_investment_line : maplotlib.lines.Line2D
A Line2D object representing the "break-even" level of
investment as a function of capital stock (per unit
effective labor).
ss_line : maplotlib.lines.Line2D
A Line2D object representing the steady state level of
investment.
"""
# create tmp_params dict to force check for valid params
tmp_params = self.params.copy()
tmp_params.update(new_params)
self.params = tmp_params # forces check for valid params!
# create the plot
k_grid = np.linspace(0, 2 * self.steady_state, Nk)
intensive_output_grid = self.evaluate_intensive_output(k_grid)
actual_investment_grid = self.evaluate_actual_investment(k_grid)
breakeven_investment_grid = self.evaluate_effective_depreciation(k_grid)
ss_investment = self.evaluate_actual_investment(self.steady_state)
intensive_output_line, = ax.plot(k_grid, intensive_output_grid, 'r-',
label='$f(k(t)$')
actual_investment_line, = ax.plot(k_grid, actual_investment_grid, 'g-',
label='$sf(k(t))$')
breakeven_investment_line, = ax.plot(k_grid, breakeven_investment_grid,
'b-', label='$(g + n + \delta)k(t)$')
ss_line, = ax.plot(self.steady_state, ss_investment, 'ko',
label='$k^*={0:.4f}$'.format(self.steady_state))
ax.set_xlabel('Capital (per unit effective labor), $k(t)$',
family='serif', fontsize=15)
ax.set_title('Solow diagram',
family='serif', fontsize=20)
ax.grid(True)
ax.legend(loc=0, frameon=False, prop={'family': 'serif'},
bbox_to_anchor=(1, 1))
lines = [intensive_output_line, actual_investment_line,
breakeven_investment_line, ss_line]
return lines
| bsd-3-clause |
pysb/pysb | pysb/simulator/base.py | 5 | 57380 | from abc import ABCMeta, abstractmethod
import numpy as np
import itertools
import sympy
import collections
from collections.abc import Mapping, Sequence
import numbers
from pysb.core import MonomerPattern, ComplexPattern, as_complex_pattern, \
Parameter, Expression, Model, ComponentSet
from pysb.logging import get_logger, EXTENDED_DEBUG
import pickle
from pysb.export.json import JsonExporter
from pysb.importers.json import model_from_json
from pysb import __version__ as PYSB_VERSION
from datetime import datetime
import dateutil.parser
import copy
from warnings import warn
from pysb.pattern import SpeciesPatternMatcher
from contextlib import contextmanager
import weakref
try:
import pandas as pd
except ImportError:
pd = None
try:
import h5py
except ImportError:
h5py = None
class SimulatorException(Exception):
pass
class InconsistentParameterError(SimulatorException, ValueError):
def __init__(self, parameter_name, value, reason):
super(InconsistentParameterError, self).__init__(
f'Value {value} that was passed for parameter {parameter_name} '
f'was inconsistent with that parameters assumption: {reason}'
)
class Simulator(object):
"""An abstract base class for numerical simulation of models.
.. warning::
The interface for this class is considered experimental and may
change without warning as PySB is updated.
Parameters
----------
model : pysb.Model
Model to simulate.
tspan : vector-like, optional
Time values over which to simulate. The first and last values define
the time range. Returned trajectories are sampled at every value unless
the simulation is interrupted for some reason, e.g., due to
satisfaction
of a logical stopping criterion (see 'tout' below).
initials : vector-like or dict, optional
Values to use for the initial condition of all species. Ordering is
determined by the order of model.species. If not specified, initial
conditions will be taken from model.initials (with initial condition
parameter values taken from `param_values` if specified).
param_values : vector-like or dict, optional
Values to use for every parameter in the model. Ordering is
determined by the order of model.parameters.
If passed as a dictionary, keys must be parameter names.
If not specified, parameter values will be taken directly from
model.parameters.
verbose : bool or int, optional (default: False)
Sets the verbosity level of the logger. See the logging levels and
constants from Python's logging module for interpretation of integer
values. False is equal to the PySB default level (currently WARNING),
True is equal to DEBUG.
Attributes
----------
verbose: bool
Verbosity flag passed to the constructor.
model : pysb.Model
Model passed to the constructor.
tspan : vector-like
Time values passed to the constructor.
Notes
-----
If ``tspan`` is not defined, it may be defined in the call to the
``run`` method.
The dimensionality of ``tout`` depends on whether a single simulation
or multiple simulations are run.
The dimensionalities of ``y``, ``yobs``, ``yobs_view``, ``yexpr``, and
``yexpr_view`` depend on the number of simulations being run as well
as on the type of simulation, i.e., spatial vs. non-spatial.
"""
__metaclass__ = ABCMeta
_supports = { 'multi_initials' : False,
'multi_param_values' : False }
@abstractmethod
def __init__(self, model, tspan=None, initials=None,
param_values=None, verbose=False, **kwargs):
# Get or create base a PySB logger for this module and model
self._logger = get_logger(self.__module__, model=model,
log_level=verbose)
self._logger.debug('Simulator created')
self._model = model
self.verbose = verbose
self.tout = None
# Per-run initial conditions/parameter/tspan override
self._tspan = tspan
# Per-run tspan, initials and param_values
self._run_tspan = None
self._run_initials = None
self._run_params = None
# Base initials and param values
self._params = None
self.param_values = param_values
self._initials = None
self.initials = initials
# Store init kwargs and run kwargs if needed for saving results
self._init_kwargs = kwargs
self._run_kwargs = None
@property
def model(self):
return self._model
@property
def tspan(self):
return self._run_tspan if self._run_tspan is not None else self._tspan
@tspan.setter
def tspan(self, new_tspan):
self._tspan = new_tspan
@staticmethod
def _num_sims_calc(initials_or_params):
""" Calculate number of simulations implied by initials or param
values """
if initials_or_params is None:
return None
if isinstance(initials_or_params, np.ndarray):
return len(initials_or_params)
first_entry = next(iter(initials_or_params.values()))
try:
return len(first_entry) # First entry is iterable
except TypeError:
return 1 # First entry is non-iterable, e.g. int, float
@property
def initials_length(self):
try:
return len(self.initials)
except SimulatorException:
# Network free simulators
if self._initials:
return len(list(self._initials.values())[0])
elif self._run_initials:
return len(list(self._run_initials.values())[0])
else:
return len(self.param_values)
def _update_initials_dict(self, initials_dict, initials_source, subs=None):
if isinstance(initials_source, Mapping):
# Can't just use .update() as we need to test
# equality with .is_equivalent_to()
for cp, value_obj in initials_source.items():
cp = as_complex_pattern(cp)
if any(existing_cp.is_equivalent_to(cp)
for existing_cp in initials_dict):
continue
if isinstance(value_obj, (Sequence, np.ndarray))\
and all(isinstance(v, numbers.Number) for v in value_obj):
value = value_obj
elif isinstance(value_obj, Expression):
value = [value_obj.expand_expr().xreplace(subs[sim]) for sim in range(len(subs))]
elif isinstance(value_obj, Parameter):
# Set parameter using param_values
pi = self._model.parameters.index(value_obj)
value = [self.param_values[sim][pi] for sim in range(len(self.param_values))]
else:
raise TypeError("Unexpected initial condition "
"value type: %s" % type(value_obj))
initials_dict[cp] = value
elif initials_source is not None:
# Update from array-like structure, which we can only do if we
# have the species available (e.g. not in network-free simulations)
if not self.model.species:
raise ValueError(
'Cannot update initials from an array-like source without '
'model species.')
for cp_idx, cp in enumerate(self.model.species):
if any(existing_cp.is_equivalent_to(cp) for existing_cp in
initials_dict):
continue
initials_dict[cp] = [initials_source[n][cp_idx]
for n in range(len(initials_source))]
return initials_dict
@property
def initials_dict(self):
n_sims = self._check_run_initials_vs_base_initials_length()
if n_sims == 1:
n_sims = len(self.param_values)
# Apply any per-run initial overrides
initials_dict = self._update_initials_dict({}, self._run_initials)
# Apply any base initial overrides
initials_dict = self._update_initials_dict(initials_dict,
self._initials)
model_initials = {ic.pattern: ic.value
for ic in self.model.initials}
# Otherwise, populate initials from the model
n_sims_params = len(self.param_values)
n_sims_actual = max(n_sims_params, n_sims)
# Get remaining initials from the model itself and
# self.param_values, if necessary
subs = None
if any(isinstance(v, Expression) for v in model_initials.values()):
# Only need parameter substitutions if model initials include
# expressions
subs = [
dict((p, pv[i]) for i, p in
enumerate(self._model.parameters))
for pv in self.param_values]
if len(subs) == 1 and n_sims_actual > 1:
subs = list(itertools.repeat(subs[0], n_sims_actual))
initials_dict = self._update_initials_dict(
initials_dict, model_initials, subs=subs
)
return initials_dict
def _check_run_initials_vs_base_initials_length(self):
# Otherwise, build the list from the model, and any overrides
# specified in self._initials and self._run_initials
n_sims_initials = self._num_sims_calc(self._initials)
n_sims_run = self._num_sims_calc(self._run_initials)
if n_sims_initials is not None and n_sims_run is not None \
and n_sims_run != n_sims_initials:
raise ValueError(
"The base initials set with self.initials imply {} "
"simulations, but the run() initials imply {} simulations."
" Either set self.initials=None, or change the number of "
"simulations in the run() initials".format(
n_sims_initials, n_sims_run))
if n_sims_initials is not None:
return n_sims_initials
elif n_sims_run is not None:
return n_sims_run
else:
return 1
@property
def initials(self):
if not self.model.species:
raise SimulatorException('No model species list - either '
'generate the model equations or use '
'initials_dict() for network-free '
'simulations')
# Check potential quick return options
if self._run_initials is not None:
if not isinstance(self._run_initials, Mapping) and \
self._initials is None:
return self._run_initials
elif not isinstance(self._initials, Mapping) and \
self._initials is not None:
return self._initials
# At this point (after dimensionality check), we can return
# self._run_initials if it's not a dictionary and not None
if self._run_initials is not None and not isinstance(
self._run_initials, Mapping):
return self._run_initials
n_sims_initials = self._check_run_initials_vs_base_initials_length()
n_sims_params = len(self.param_values)
n_sims_actual = max(n_sims_params, n_sims_initials)
y0 = np.full((n_sims_actual, len(self.model.species)), 0.0)
for species, vals in self.initials_dict.items():
species_index = self._model.get_species_index(species)
y0[:, species_index] = vals
return y0
@initials.setter
def initials(self, new_initials):
self._initials = self._process_incoming_initials(new_initials)
def _process_incoming_initials(self, new_initials):
if new_initials is None:
return None
# If new_initials is a pandas dataframe, convert to a dict
if pd and isinstance(new_initials, pd.DataFrame):
new_initials = new_initials.to_dict(orient='list')
# If new_initials is a list, convert to numpy array
if isinstance(new_initials, list):
new_initials = np.array(new_initials, copy=False)
# Check if new_initials is a dict, and if so validate the keys
# (ComplexPatterns)
if isinstance(new_initials, dict):
n_sims = 1
if len(new_initials) > 0:
n_sims = self._num_sims_calc(new_initials)
for cplx_pat, val in new_initials.items():
if not isinstance(cplx_pat, (MonomerPattern,
ComplexPattern)):
raise ValueError('Dictionary key %s is not a '
'MonomerPattern or ComplexPattern' %
repr(cplx_pat))
# if val is a number, convert it to a single-element array
if not isinstance(val, (Sequence, np.ndarray)):
val = [val]
new_initials[cplx_pat] = np.array(val)
# otherwise, check whether simulator supports multiple
# initial values :
if len(val) != n_sims:
raise ValueError("all arrays in new_initials dictionary "
"must be equal length")
if not np.isfinite(val).all():
raise ValueError('Please check initial {} for non-finite '
'values'.format(cplx_pat))
elif isinstance(new_initials, np.ndarray):
# if new_initials is a 1D array, convert to a 2D array of length 1
if len(new_initials.shape) == 1:
new_initials = np.resize(new_initials, (1, len(new_initials)))
n_sims = new_initials.shape[0]
# make sure number of initials values equals len(model.species)
if new_initials.shape[1] != len(self._model.species):
raise ValueError("new_initials must be the same length as "
"model.species")
if not np.isfinite(new_initials).all():
raise ValueError('Please check initials array '
'for non-finite values')
else:
raise ValueError(
'Implicit conversion of data type "{}" is not '
'supported. Please supply initials as a numpy array, list, '
'or a pandas DataFrame.'.format(type(new_initials)))
if n_sims > 1:
if not self._supports['multi_initials']:
raise ValueError(
self.__class__.__name__ +
" does not support multiple initial values at this time.")
if 1 < len(self.param_values) != n_sims:
raise ValueError(
'Cannot set initials for {} simulations '
'when param_values has been set for {} '
'simulations'.format(
n_sims, len(self.param_values)))
return new_initials
@property
def param_values(self):
if not self.model._derived_parameters:
if self._params is not None and \
not isinstance(self._params, dict) and \
self._run_params is None:
return self._params
elif self._run_params is not None and \
not isinstance(self._run_params, dict) and \
self._params is None:
return self._run_params
# create parameter vector from the values in the model
param_values_dict = {}
n_sims = self._num_sims_calc(self._params)
if isinstance(self._params, dict):
param_values_dict.update(self._params)
elif isinstance(self._params, np.ndarray):
param_values_dict = dict(zip(
[p.name for p in self._model.parameters], self._params.T))
n_sims_run = self._num_sims_calc(self._run_params)
if n_sims is None:
n_sims = n_sims_run
elif n_sims_run is not None and n_sims_run != n_sims:
raise ValueError(
"The base parameters set with self.param_values imply "
"{} simulations, but the run() params imply {} "
"simulations. Either set self.param_values=None, or "
"change the number of simulations in the run() params"
.format(n_sims, n_sims_run))
# At this point (after dimensionality check) we can return the
# _run_params, if it's not a dict
if self._run_params is not None:
if not isinstance(self._run_params, dict):
if not self._model._derived_parameters:
return self._run_params
else:
param_values_dict.update(dict(zip(
self.model.parameters.keys(), self._run_params
)))
else:
param_values_dict.update(self._run_params)
if n_sims is None:
n_sims = 1
# Get the base parameters from the model
param_values = np.array(
[p.value for p in self._model.parameters] +
[p.value for p in self._model._derived_parameters]
)
param_values = np.repeat([param_values], n_sims, axis=0)
# Process overrides
for key in param_values_dict.keys():
try:
pi = self._model.parameters.index(
self._model.parameters[key])
except KeyError:
raise IndexError("new_params dictionary has unknown "
"parameter name (%s)" % key)
# loop over n_sims
for n in range(n_sims):
param_values[n][pi] = param_values_dict[key][n]
return param_values
@param_values.setter
def param_values(self, new_params):
self._params = self._process_incoming_params(new_params)
def _process_incoming_params(self, new_params):
if new_params is None:
return None
# Convert pandas dataframe to dictionary
if pd and isinstance(new_params, pd.DataFrame):
new_params = new_params.to_dict(orient='list')
# If new_params is a list, convert to numpy array
if isinstance(new_params, list):
new_params = np.array(new_params)
if isinstance(new_params, dict):
n_sims = 1
if len(new_params) > 0:
n_sims = self._num_sims_calc(new_params)
for key, val in new_params.items():
if key not in self._model.parameters.keys():
raise IndexError("new_params dictionary has unknown "
"parameter name (%s)" % key)
# if val is a number, convert it to a single-element array
if not isinstance(val, Sequence):
val = [val]
new_params[key] = np.array(val)
# Check all elements are the same length
if len(val) != n_sims:
raise ValueError("all arrays in params dictionary "
"must be equal length")
for value in val:
try:
self._model.parameters[key].check_value(value)
except ValueError as e:
raise InconsistentParameterError(
key, value, str(e)
)
elif isinstance(new_params, np.ndarray):
# if new_params is a 1D array, convert to a 2D array of length 1
if len(new_params.shape) == 1:
new_params = np.resize(new_params, (1, len(new_params)))
n_sims = new_params.shape[0]
# make sure number of param values equals len(model.parameters)
if new_params.shape[1] != len(self._model.parameters):
raise ValueError("new_params must be the same length as "
"model.parameters")
for isim in range(n_sims):
for param, value in zip(self._model.parameters,
new_params[isim, :]):
try:
param.check_value(value)
except ValueError as e:
raise InconsistentParameterError(
param.name, value, str(e)
)
else:
raise ValueError(
'Implicit conversion of data type "{}" is not '
'supported. Please supply parameters as a numpy array, list, '
'or a pandas DataFrame.'.format(type(new_params)))
# Check whether simulator supports multiple param_values
if n_sims > 1 and not self._supports['multi_param_values']:
raise ValueError(
self.__class__.__name__ +
" does not support multiple parameter values at this time.")
return new_params
def _reset_run_overrides(self):
"""
Reset any single-run tspan, initials, param_values
When calling run(), the user can specify tspan, initials and
param_values, which are only used for a single run. This method
resets those overrides after the run is complete (called from
:func:`SimulationResult.__init__`).
"""
self._run_tspan = None
self._run_initials = None
self._run_params = None
@abstractmethod
def run(self, tspan=None, initials=None, param_values=None,
_run_kwargs=None):
"""Run a simulation.
Notes for developers implementing Simulator subclasses:
Implementations should return a :class:`.SimulationResult` object.
Subclasses should pass any additional arguments run as a dictonary
to the `_run_kwargs` argument when calling the superclass's run
method. If the run method has variable keyword arguments, this can
be achieved by passing `_run_kwargs=locals()` to the superclass's
run method. The run kwargs are used for reference when saving and
loading SimulationResults to disk. They aren't compulsory, but not
including them will generate a warning. To suppress (e.g. if there
are no additional arguments), set `_run_kwargs=[]`.
"""
self._logger.info('Simulation(s) started')
if _run_kwargs:
# Don't store these arguments twice
_run_kwargs.pop('self')
_run_kwargs.pop('initials', None)
_run_kwargs.pop('param_values', None)
_run_kwargs.pop('tspan', None)
self._run_kwargs = _run_kwargs
elif _run_kwargs is None:
self._logger.warning(
'{} has not passed any additional run arguments to '
'_run_kwargs. Instructions are included in the Simulation '
'base class run method docstring.'.format(
self.__class__.__name__))
self._run_tspan = tspan
if self.tspan is None:
raise ValueError("tspan must be defined before "
"simulation can run")
self._run_params = self._process_incoming_params(param_values)
self._run_initials = self._process_incoming_initials(initials)
# If only one set of param_values, run all simulations
# with the same parameters
if len(self.param_values) == 1 and self.initials_length > 1:
new_params = np.repeat(self.param_values,
self.initials_length,
axis=0)
self._run_params = new_params
# Error checks on 'param_values' and 'initials'
if len(self.param_values) != self.initials_length:
raise ValueError(
"'param_values' and 'initials' must be equal lengths.\n"
"len(param_values): %d\n"
"len(initials): %d" %
(len(self.param_values), self.initials_length))
elif len(self.param_values.shape) != 2 or \
self.param_values.shape[1] != (
len(self._model.parameters) +
len(self._model._derived_parameters)):
raise ValueError(
"'param_values' must be a 2D array of dimension N_SIMS x "
"len(model.parameters).\n"
"param_values.shape: " + str(self.param_values.shape) +
"\nlen(model.parameters): %d" %
len(self._model.parameters))
if self.model.species and (len(self.initials.shape) != 2 or
self.initials.shape[1] != len(self._model.species)):
raise ValueError(
"'initials' must be a 2D array of dimension N_SIMS x "
"len(model.species).\n"
"initials.shape: " + str(self.initials.shape) +
"\nlen(model.species): %d" % len(self._model.species))
return None
class SimulationResult(object):
"""
Results of a simulation with properties and methods to access them.
.. warning::
Please note that the interface for this class is considered
experimental and may change without warning as PySB is updated.
Notes
-----
In the attribute descriptions, a "trajectory set" is a 2D numpy array,
species on first axis and time on second axis, with each element
containing the concentration or count of the species at the specified time.
A list of trajectory sets contains a trajectory set for each simulation.
Parameters
----------
simulator : Simulator
The simulator object that generated the trajectories
tout: list-like
Time points returned by the simulator (may be different from ``tspan``
if simulation is interrupted for some reason).
trajectories : list or numpy.ndarray
A set of species trajectories from a simulation. Should either be a
list of 2D numpy arrays or a single 3D numpy array.
squeeze : bool, optional (default: True)
Return trajectories as a 2D array, rather than a 3d array, if only
a single simulation was performed.
simulations_per_param_set : int
Number of trajectories per parameter set. Typically always 1 for
deterministic simulators (e.g. ODE), but with stochastic simulators
multiple trajectories per parameter/initial condition set are often
desired.
model: pysb.Model
initials: numpy.ndarray
param_values: numpy.ndarray
model, initials, param_values are an alternative constructor
mechanism used when loading SimulationResults from files (see
:func:`SimulationResult.load`). Setting just the simulator argument
instead of these arguments is recommended.
Examples
--------
The following examples use a simple model with three observables and one
expression, with a single simulation.
>>> from pysb.examples.expression_observables import model
>>> from pysb.simulator import ScipyOdeSimulator
>>> import numpy as np
>>> np.set_printoptions(precision=4)
>>> sim = ScipyOdeSimulator(model, tspan=np.linspace(0, 40, 10), \
integrator_options={'atol': 1e-20})
>>> simulation_result = sim.run()
``simulation_result`` is a :class:`SimulationResult` object. An
observable can be accessed like so:
>>> print(simulation_result.observables['Bax_c0']) \
#doctest: +NORMALIZE_WHITESPACE
[1.0000e+00 1.1744e-02 1.3791e-04 1.6196e-06 1.9020e-08
2.2337e-10 2.6232e-12 3.0806e-14 3.6178e-16 4.2492e-18]
It is also possible to retrieve the value of all observables at a
particular time point, e.g. the final concentrations:
>>> print(simulation_result.observables[-1]) \
#doctest: +SKIP
(4.2492e-18, 1.6996e-16, 1.)
Expressions are read in the same way as observables:
>>> print(simulation_result.expressions['NBD_signal']) \
#doctest: +NORMALIZE_WHITESPACE
[0. 4.7847 4.9956 4.9999 5. 5. 5. 5. 5. 5. ]
The species trajectories can be accessed as a numpy ndarray:
>>> print(simulation_result.species) #doctest: +NORMALIZE_WHITESPACE
[[1.0000e+00 0.0000e+00 0.0000e+00]
[1.1744e-02 5.2194e-02 9.3606e-01]
[1.3791e-04 1.2259e-03 9.9864e-01]
[1.6196e-06 2.1595e-05 9.9998e-01]
[1.9020e-08 3.3814e-07 1.0000e+00]
[2.2337e-10 4.9637e-09 1.0000e+00]
[2.6232e-12 6.9951e-11 1.0000e+00]
[3.0806e-14 9.5840e-13 1.0000e+00]
[3.6178e-16 1.2863e-14 1.0000e+00]
[4.2492e-18 1.6996e-16 1.0000e+00]]
Species, observables and expressions can be combined into a single numpy
ndarray and accessed similarly. Here, the initial concentrations of all
these entities are examined:
>>> print(simulation_result.all[0]) #doctest: +SKIP
( 1., 0., 0., 1., 0., 0., 0.)
The ``all`` array can be accessed as a pandas DataFrame object,
which allows for more convenient indexing and access to pandas advanced
functionality, such as indexing and slicing. Here, the concentrations of
the observable ``Bax_c0`` and the expression ``NBD_signal`` are read at
time points between 5 and 15 seconds:
>>> df = simulation_result.dataframe
>>> print(df.loc[5:15, ['Bax_c0', 'NBD_signal']]) \
#doctest: +NORMALIZE_WHITESPACE
Bax_c0 NBD_signal
time
8.888889 0.000138 4.995633
13.333333 0.000002 4.999927
"""
CUSTOM_ATTR_PREFIX = 'usrattr_'
def __init__(self, simulator, tout, trajectories=None,
observables_and_expressions=None, squeeze=True,
simulations_per_param_set=1,
model=None, initials=None, param_values=None):
if simulator:
simulator._logger.debug('SimulationResult constructor started')
self._param_values = simulator.param_values.copy()
try:
self._initials = simulator.initials.copy()
except SimulatorException:
# Network free simulations don't have initials list, only dict
self._initials = simulator.initials_dict.copy()
self._model = copy.deepcopy(simulator._model)
self.simulator_class = simulator.__class__
self.init_kwargs = copy.deepcopy(simulator._init_kwargs)
self.run_kwargs = copy.deepcopy(simulator._run_kwargs)
else:
self._param_values = param_values
self._initials = initials
self._model = model
self.simulator_class = None
self.init_kwargs = {}
self.run_kwargs = {}
self.squeeze = squeeze
self.tout = np.asarray(tout)
self._yfull = None
self.n_sims_per_parameter_set = simulations_per_param_set
self.pysb_version = PYSB_VERSION
self.timestamp = datetime.now()
self.custom_attrs = {}
if trajectories is None and observables_and_expressions is None:
raise ValueError('Need to supply at least one of species '
'trajectories or observables_and_expressions')
if trajectories is not None and len(trajectories) > 0:
# Validate incoming trajectories
if getattr(trajectories, 'ndim', None) == 3:
# trajectories is a 3D array, create a list of 2D arrays
# This is just a view and doesn't copy the data
self._y = [tr for tr in trajectories]
else:
# Not a 3D array, check for a list of 2D arrays
try:
if any(tr.ndim != 2 for tr in trajectories):
raise AttributeError
except (AttributeError, TypeError):
raise ValueError("trajectories should be a 3D array or a "
"list of 2D arrays")
self._y = trajectories
self._nsims = len(self._y)
if len(self.tout) != self.nsims:
raise ValueError("Simulator tout should be the same length as "
"trajectories")
for i in range(self.nsims):
if len(self.tout[i]) != self._y[i].shape[0]:
raise ValueError("The number of time points in tout[{0}] "
"should match the trajectories array for "
"simulation {0}".format(i))
if self._y[i].shape[1] != len(self._model.species):
raise ValueError("The number of species in trajectory {0} "
"should match length of "
"model.species".format(i))
else:
self._y = None
# Calculate ``yobs`` and ``yexpr`` based on values of ``y``
exprs = self._model.expressions_dynamic(include_local=False)
expr_names = [expr.name for expr in exprs]
model_obs = self._model.observables
obs_names = list(model_obs.keys())
param_names = list(p.name for p in self._model.parameters)
if not _allow_unicode_recarray():
for name_list, name_type in zip(
(expr_names, obs_names, param_names),
('Expression', 'Observable', 'Parameter')):
for i, name in enumerate(name_list):
try:
name_list[i] = name.encode('ascii')
except UnicodeEncodeError:
error_msg = 'Non-ASCII compatible ' + \
'%s names not allowed' % name_type
raise ValueError(error_msg)
yobs_dtype = (list(zip(obs_names, itertools.repeat(float)))
if obs_names else float)
yexpr_dtype = (list(zip(expr_names, itertools.repeat(float)))
if expr_names else float)
if observables_and_expressions:
# Observables and expression values are used as supplied
self._nsims = len(observables_and_expressions)
self._yobs_view = [observables_and_expressions[n][:, 0:(len(
self._model.observables))] for n in range(self.nsims)]
self._yexpr_view = [observables_and_expressions[n][:, (len(
self._model.observables)):] for n in range(self.nsims)]
self._yobs = [self._yobs_view[n].reshape(
len(tout[n]) * len(obs_names)).view(dtype=yobs_dtype) for n
in range(self.nsims)]
self._yexpr = [self._yexpr_view[n].reshape(
len(tout[n]) * len(expr_names)).view(dtype=yexpr_dtype) for n
in range(self.nsims)]
else:
self._yobs = [np.ndarray((len(self.tout[n]),), dtype=yobs_dtype) if obs_names
else np.ndarray((len(self.tout[n]), 0), dtype=yobs_dtype)
for n in range(self.nsims)]
self._yobs_view = [self._yobs[n].view(float).
reshape(len(self._yobs[n]), -1) for n in range(
self.nsims)]
self._yexpr = [np.ndarray((len(self.tout[n]),), dtype=yexpr_dtype) if expr_names
else np.ndarray((len(self.tout[n]), 0), dtype=yexpr_dtype)
for n in range(self.nsims)]
self._yexpr_view = [self._yexpr[n].view(float).reshape(len(
self._yexpr[n]), -1) for n in range(self.nsims)]
# loop over simulations
sym_names = obs_names + param_names
expanded_exprs = [sympy.lambdify(sym_names, expr.expand_expr(),
"numpy") for expr in exprs]
for n in range(self.nsims):
if simulator:
simulator._logger.log(EXTENDED_DEBUG,
'Evaluating exprs/obs %d/%d'
% (n + 1, self.nsims))
# observables
for i, obs in enumerate(model_obs):
self._yobs_view[n][:, i] = (
self._y[n][:, obs.species] * obs.coefficients).sum(axis=1)
# expressions
sym_dict = dict((k, self._yobs[n][k]) for k in obs_names)
sym_dict.update(dict((p.name, self.param_values[
n // self.n_sims_per_parameter_set][i]) for i, p in
enumerate(self._model.parameters)))
for i, expr in enumerate(exprs):
self._yexpr_view[n][:, i] = expanded_exprs[i](**sym_dict)
if simulator:
simulator._reset_run_overrides()
simulator._logger.debug('SimulationResult constructor finished')
def _squeeze_output(self, trajectories):
"""
Reduces trajectories to a 2D matrix if only one simulation present
Can be disabled by setting self.squeeze to False
"""
if self.nsims == 1 and self.squeeze:
return trajectories[0]
else:
return trajectories
@property
def nsims(self):
""" The number of simulations in this SimulationResult """
return self._nsims
@property
def all(self):
"""
Aggregate species, observables, and expressions trajectories into
a numpy.ndarray with record-style data-type for return to the user.
"""
if self._yfull is None:
if self._y is None:
yfull_dtype = []
else:
sp_names = ['__s%d' % i
for i in range(len(self._model.species))]
yfull_dtype = list(zip(sp_names, itertools.repeat(float)))
if len(self._model.observables):
yfull_dtype += self._yobs[0].dtype.descr
if len(self._model.expressions_dynamic()):
yfull_dtype += self._yexpr[0].dtype.descr
yfull = []
# loop over simulations
for n in range(self.nsims):
yfull.append(np.ndarray(len(self.tout[n]), yfull_dtype))
yfull_view = yfull[n].view(float).reshape((len(yfull[n]), -1))
n_sp = self._y[n].shape[1] if self._y else 0
n_ob = self._yobs_view[n].shape[1]
n_ex = self._yexpr_view[n].shape[1]
if self._y:
yfull_view[:, :n_sp] = self._y[n]
yfull_view[:, n_sp:n_sp + n_ob] = self._yobs_view[n]
yfull_view[:, n_sp + n_ob:n_sp + n_ob + n_ex] = \
self._yexpr_view[n]
self._yfull = yfull
return self._squeeze_output(self._yfull)
@property
def dataframe(self):
"""
A conversion of the trajectory sets (species, observables and
expressions for all simulations) into a single
:py:class:`pandas.DataFrame`.
"""
if pd is None:
raise Exception('Please "pip install pandas" for this feature')
sim_ids = (np.repeat(range(self.nsims), [len(t) for t in self.tout]))
times = np.concatenate(self.tout)
if self.nsims == 1 and self.squeeze:
idx = pd.Index(times, name='time')
else:
idx = pd.MultiIndex.from_tuples(list(zip(sim_ids, times)),
names=['simulation', 'time'])
simdata = self.all
if not isinstance(simdata, np.ndarray):
simdata = np.concatenate(simdata)
return pd.DataFrame(simdata, index=idx)
@property
def species(self):
"""
List of trajectory sets. The first dimension contains species.
"""
if self._y is None:
raise ValueError('No trajectories are available for network-free '
'simulations')
return self._squeeze_output(self._y)
@property
def observables(self):
"""
List of trajectory sets. The first dimension contains observables.
"""
if not self._model.observables:
raise ValueError('Model has no observables')
return self._squeeze_output(self._yobs)
def observable(self, pattern):
"""
Calculate a pattern's trajectories without adding to model
This method calculates an observable "on demand" using
any supplied MonomerPattern or ComplexPattern against the simulation
result, without re-running the simulation.
Note that the monomers within the supplied pattern are reconciled
with the SimulationResult's internal copy of the model by name. This
method only works on simulations which calculate species
trajectories (i.e. it will not work on network-free simulations).
Raises a ValueError if the pattern does not match at least one species.
Parameters
----------
pattern: pysb.MonomerPattern or pysb.ComplexPattern
An observable pattern to match
Returns
-------
pandas.Series
Series containing the simulation trajectories for the specified
observable
Examples
--------
>>> from pysb import ANY
>>> from pysb.examples import earm_1_0
>>> from pysb.simulator import ScipyOdeSimulator
>>> simres = ScipyOdeSimulator(earm_1_0.model, tspan=range(5)).run()
>>> m = earm_1_0.model.monomers
Observable of bound Bid:
>>> simres.observable(m.Bid(b=ANY))
time
0 0.000000e+00
1 1.190933e-12
2 2.768582e-11
3 1.609716e-10
4 5.320530e-10
dtype: float64
Observable of AMito bound to mCytoC:
>>> simres.observable(m.AMito(b=1) % m.mCytoC(b=1))
time
0 0.000000e+00
1 1.477319e-77
2 1.669917e-71
3 5.076939e-69
4 1.157400e-66
dtype: float64
"""
# Adjust the supplied pattern's monomer objects to match the
# simulationresult's internal model
if isinstance(pattern, MonomerPattern):
self._update_monomer_pattern(pattern)
elif isinstance(pattern, ComplexPattern):
for mp in pattern.monomer_patterns:
self._update_monomer_pattern(mp)
else:
raise ValueError('The pattern must be a MonomerPattern or '
'ComplexPattern')
if self._y is None:
raise ValueError('On demand observables can only be calculated '
'on simulations with species trajectories')
obs_matches = SpeciesPatternMatcher(self._model).match(
pattern, index=True, counts=True)
if not obs_matches:
raise ValueError('No species match the supplied observable '
'pattern')
return self.dataframe.iloc[:, list(obs_matches.keys())].multiply(
list(obs_matches.values())).sum(axis=1)
def _update_monomer_pattern(self, pattern):
""" Update a pattern's monomer objects to use internal model
Internal function for in-place update of a pattern to replace its
monomers with those from SimulationResult's model, matching by name.
Raises ValueError if no monomer with the specified name is in the
model.
"""
mon_name = pattern.monomer.name
try:
new_mon = self._model.monomers[mon_name]
except KeyError:
raise ValueError('There was no monomer called "{}" in the model '
'"{}" at the time of simulation'.format(
mon_name, self._model.name))
pattern.monomer = new_mon
@property
def expressions(self):
"""
List of trajectory sets. The first dimension contains expressions.
"""
if not self._model.expressions_dynamic():
raise ValueError('Model has no dynamic expressions')
return self._squeeze_output(self._yexpr)
@property
def initials(self):
return self._initials
@property
def param_values(self):
return self._param_values
def save(self, filename, dataset_name=None, group_name=None,
append=False, include_obs_exprs=False):
"""
Save a SimulationResult to a file (HDF5 format)
HDF5 is a hierarchical, binary storage format well suited to storing
matrix-like data. Our implementation requires the h5py package.
Each SimulationResult is treated as an HDF5 dataset, stored within a
group which is specific to a model. In this way, it is possible to save
multiple SimulationResults for a specific model.
A group is first created in the HDF file root (see group_name
argument). Within that group, a dataset "_model" has a JSON
version of the PySB model. SimulationResult are stored as groups
within the model group.
The file hierarchy under group_name/dataset_name/ then consists of
the following HDF5 gzip compressed HDF5 datasets: trajectories,
param_values, initials, tout, observables (optional) and expressions
(optional); and the following attributes:
simulator_class (pickled Class), simulator_kwargs (pickled dict),
squeeze (bool), simulations_per_param_set (int), pysb_version (str),
timestamp (ISO 8601 format).
Custom attributes can be stored in the SimulationResult's
`custom_attrs` dictionary. Keys should be strings, values can be any
picklable object. When saved to HDF5, these custom attributes will
be prefixed with ``usrattr_``.
Parameters
----------
filename: str
Filename to which the data will be saved
dataset_name: str or None
Dataset name. If None, it will default to 'result'. If the
dataset_name already exists within the group, a ValueError is
raised.
group_name: str or None
Group name. If None, will default to the name of the model.
append: bool
If False, raise IOError if the specified file already exists. If
True, append to existing file (or create if it doesn't exist).
include_obs_exprs: bool
Whether to save observables and expressions in the file or not.
If they are not included, they can be recreated from the model
and species trajectories when loaded back into PySB, but you may
wish to include them for use with external software, or if you
have complex expressions which take a long time to compute.
"""
if h5py is None:
raise Exception('Please install the h5py package for this feature')
if self._y is None and not include_obs_exprs:
warn('This SimulationResult has no trajectories - '
'you will need to set include_obs_exprs=True if '
'you wish to save observables and expressions')
if group_name is None:
group_name = self._model.name
if dataset_name is None:
dataset_name = 'result'
# np.void maps to bytes in HDF5.
enpickle = lambda obj: np.void(pickle.dumps(obj, -1))
model_json = JsonExporter(self._model).export(include_netgen=True)
with h5py.File(filename, 'a' if append else 'w-') as hdf:
# Get or create the group
try:
grp = hdf.create_group(group_name)
grp.create_dataset('_model_json', data=model_json)
if '_model' in grp:
raise ValueError()
except ValueError:
grp = hdf[group_name]
if '_model_json' in grp:
model = model_from_json(grp['_model_json'][()])
else:
with _patch_model_setstate():
model = pickle.loads(grp['_model'][()])
if model.name != self._model.name:
raise ValueError('SimulationResult model has name "{}", '
'but the model in HDF5 file group "{}" '
'has name "{}"'.format(self._model.name,
group_name,
model.name))
# Create the result dataset, which is actually a nested HDF group
dset = grp.create_group(dataset_name)
if self._y is not None:
dset.create_dataset('trajectories', data=self._y,
compression='gzip', shuffle=True)
if include_obs_exprs:
dset.create_dataset('observables', data=self._yobs_view,
compression='gzip', shuffle=True)
dset.create_dataset('expressions', data=self._yexpr_view,
compression='gzip', shuffle=True)
dset.create_dataset('param_values', data=self.param_values,
compression='gzip', shuffle=True)
if isinstance(self.initials, np.ndarray):
dset.create_dataset('initials', data=self.initials,
compression='gzip', shuffle=True)
else:
dset.create_dataset('initials_dict', data=enpickle(
self.initials))
dset.create_dataset('tout', data=self.tout,
compression='gzip')
dset.attrs['simulator_class'] = enpickle(self.simulator_class)
dset.attrs['init_kwargs'] = enpickle(self.init_kwargs)
dset.attrs['run_kwargs'] = enpickle(self.run_kwargs)
dset.attrs['squeeze'] = self.squeeze
dset.attrs['simulations_per_param_set'] = \
self.n_sims_per_parameter_set
dset.attrs['pysb_version'] = self.pysb_version
dset.attrs['timestamp'] = datetime.isoformat(
self.timestamp)
# This is the range of ints that can be natively encoded in HDF5.
int_min = np.iinfo(np.int64).min
int_max = np.iinfo(np.uint64).max
for attr_name, attr_val in self.custom_attrs.items():
# Pass HDF5-native values straight through, pickling others.
if (not (isinstance(attr_val,
(str, bytes, float, complex))
or (isinstance(attr_val, numbers.Integral)
and int_min <= attr_val <= int_max))):
attr_val = enpickle(attr_val)
dset.attrs[self.CUSTOM_ATTR_PREFIX + attr_name] = attr_val
@classmethod
def load(cls, filename, dataset_name=None, group_name=None):
"""
Load a SimulationResult from a file (HDF5 format)
For a description of the file format see :func:`save`
Parameters
----------
filename: str
Filename from which to load data
dataset_name: str or None
Dataset name. Can be left as None when the group specified only
contains one dataset, which will then be selected. If None and
more than one dataset is in the group, a ValueError is raised.
group_name: str or None
Group name. This is typically the name of the model. Can be left as
None when the file only contains one group, which will then be
selected. If None and more than group is in the file a
ValueError is raised.
Returns
-------
SimulationResult
Set of trajectories and associated metadata loaded from the file
"""
if h5py is None:
raise Exception('Please "pip install h5py" for this feature')
with h5py.File(filename, 'r') as hdf:
if group_name is None:
groups = hdf.keys()
if len(groups) > 1:
raise ValueError("group_name must be specified when file "
"contains more than one group. Options "
"are: {}".format(str(groups)))
group_name = next(iter(hdf))
grp = hdf[group_name]
if dataset_name is None:
datasets = [k for k in grp.keys() if k not in
('_model', '_model_json')]
if len(datasets) > 1:
raise ValueError("dataset_name must be specified when "
"group contains more than one dataset. "
"Options are: {}".format(str(datasets)))
dataset_name = datasets[0]
dset = grp[dataset_name]
obs_and_exprs = None
if 'observables' in dset.keys():
obs_and_exprs = list(dset['observables'][:])
if 'expressions' in dset.keys():
exprs = dset['expressions'][:]
if obs_and_exprs is None:
obs_and_exprs = list(exprs)
else:
for i in range(len(obs_and_exprs)):
obs_and_exprs[i] = np.concatenate(
[obs_and_exprs[i], exprs[i]],
axis=1
)
trajectories = None
try:
trajectories = dset['trajectories'][:]
except KeyError:
pass
try:
initials = dset['initials'][:]
except KeyError:
initials = pickle.loads(dset['initials_dict'][()])
if '_model_json' in grp:
model = model_from_json(grp['_model_json'][()])
else:
warn('The SimulationResult file uses an old model '
'format (pickled). It\'s recommended you re-save '
'the SimulationResult to use the new format (JSON).')
with _patch_model_setstate():
model = pickle.loads(grp['_model'][()])
simres = cls(
simulator=None,
model=model,
initials=initials,
param_values=dset['param_values'][:],
tout=dset['tout'][:],
trajectories=trajectories,
observables_and_expressions=obs_and_exprs,
squeeze=dset.attrs['squeeze'],
simulations_per_param_set=dset.attrs[
'simulations_per_param_set']
)
simres.pysb_version = dset.attrs['pysb_version']
simres.timestamp = dateutil.parser.parse(
dset.attrs['timestamp'])
simres.simulator_class = pickle.loads(
dset.attrs['simulator_class'])
simres.init_kwargs = pickle.loads(dset.attrs['init_kwargs'])
simres.run_kwargs = pickle.loads(dset.attrs['run_kwargs'])
for attr_name in dset.attrs.keys():
if attr_name.startswith(cls.CUSTOM_ATTR_PREFIX):
orig_name = attr_name[len(cls.CUSTOM_ATTR_PREFIX):]
attr_val = dset.attrs[attr_name]
# Restore objects that were pickled for storage.
if isinstance(attr_val, np.void):
attr_val = pickle.loads(attr_val)
simres.custom_attrs[orig_name] = attr_val
return simres
def _allow_unicode_recarray():
"""Return True if numpy recarray can take unicode data type.
In python 2, numpy doesn't allow unicode strings as names in arrays even
if they are ascii encodeable. This function tests this directly.
"""
try:
np.ndarray((1,), dtype=[(u'X', float)])
except TypeError:
return False
return True
def _model_setstate_monkey_patch(self, state):
"""Monkey patch for Model.__setstate__ for restoring from older pickles"""
# restore the 'model' weakrefs on all components
self.__dict__.update(state)
# Set "tags" attribute for older, pickled models
self.__dict__.setdefault('tags', ComponentSet())
for c in self.all_components():
c.model = weakref.ref(self)
@contextmanager
def _patch_model_setstate():
old_setstate = Model.__setstate__
Model.__setstate__ = _model_setstate_monkey_patch
try:
yield
finally:
Model.__setstate__ = old_setstate
| bsd-2-clause |
profxj/old_xastropy | xastropy/xguis/img_widgets.py | 5 | 58283 | """
#;+
#; NAME:
#; spec_widgets
#; Version 1.0
#;
#; PURPOSE:
#; Module for IMG widgets with QT
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys, imp
import matplotlib.pyplot as plt
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
# astropy
from astropy.table.table import Table
from astropy import constants as const
from astropy import units as u
# xastropy
from xastropy.xutils import xdebug as xdb
from xastropy.plotting import utils as xputils
from xastropy.xguis import utils as xguiu
xa_path = imp.find_module('xastropy')[1]
# class ExamineSpecWidget
# class PlotLinesWidget
class ExamineImgWidget(QtGui.QWidget):
''' Widget to show an image and fiddle about
12-Apr-2015 by JXP
'''
import xastropy.xutils as xxu
from xastropy import stats as xstats
def __init__(self, ispec, parent=None, status=None, llist=None,
abs_sys=None, norm=True, second_file=None, zsys=None):
'''
img = np.array (2D)
'''
super(ExamineImgWidget, self).__init__(parent)
# Image
spec, spec_fil = read_spec(ispec, second_file=second_file)
self.orig_spec = spec # For smoothing
self.spec = self.orig_spec
# Abs Systems
if abs_sys is None:
self.abs_sys = []
else:
self.abs_sys = abs_sys
self.norm = norm
self.psdict = {} # Dict for spectra plotting
self.adict = {} # Dict for analysis
self.init_spec()
self.xval = None # Used with velplt
# Status Bar?
if not status is None:
self.statusBar = status
# Line List?
if llist is None:
self.llist = {'Plot': False, 'List': 'None', 'z': 0.}
else:
self.llist = llist
# zsys
if not zsys is None:
self.llist['z'] = zsys
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 150 # 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Make two plots
self.ax = self.fig.add_subplot(1,1,1)
self.fig.subplots_adjust(hspace=0.1, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
#
# Draw on init
self.on_draw()
# Setup the spectrum plotting info
def init_spec(self):
#xy min/max
xmin = np.min(self.spec.dispersion).value
xmax = np.max(self.spec.dispersion).value
ymed = np.median(self.spec.flux).value
ymin = 0. - 0.1*ymed
ymax = ymed * 1.5
#
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.psdict['xmnx'] = np.array([xmin,xmax])
self.psdict['ymnx'] = [ymin,ymax]
self.psdict['sv_xy'] = [ [xmin,xmax], [ymin,ymax] ]
self.psdict['nav'] = navigate(0,0,init=True)
# Analysis dict
self.adict['flg'] = 0 # Column density flag
# Main Driver
def on_key(self,event):
flg = -1
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
## DOUBLETS
if event.key in ['C','M','O','8','B']: # Set left
wave = set_doublet(self, event)
#print('wave = {:g},{:g}'.format(wave[0], wave[1]))
self.ax.plot( [wave[0],wave[0]], self.psdict['ymnx'], '--', color='red')
self.ax.plot( [wave[1],wave[1]], self.psdict['ymnx'], '--', color='red')
flg = 2 # Layer
## SMOOTH
if event.key == 'S':
self.spec = self.spec.box_smooth(2)
flg = 1
if event.key == 'U':
self.spec = self.orig_spec
flg = 1
## ANALYSIS: EW, AODM column density
if event.key in ['N', 'E', '$']:
# If column check for line list
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if (event.key in ['N','E']) & (self.llist['List'] == 'None'):
print('xspec: Choose a Line list first!')
try:
self.statusBar().showMessage('Choose a Line list first!')
except AttributeError:
pass
self.adict['flg'] = 0
return
flg = 1
if self.adict['flg'] == 0:
self.adict['wv_1'] = event.xdata # wavelength
self.adict['C_1'] = event.ydata # continuum
self.adict['flg'] = 1
else:
self.adict['wv_2'] = event.xdata # wavelength
self.adict['C_2'] = event.ydata # continuum
self.adict['flg'] = 2 # Ready to plot + print
# Sort em + make arrays
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
iwv = np.array(sorted([self.adict['wv_1'], self.adict['wv_2']])) * self.spec.wcs.unit
ic = np.array(sorted([self.adict['C_1'], self.adict['C_2']]))
# Calculate the continuum (linear fit)
param = np.polyfit(iwv, ic, 1)
cfunc = np.poly1d(param)
conti = cfunc(self.spec.dispersion)
if event.key == '$': # Simple stats
pix = self.spec.pix_minmax(iwv)[0]
mean = np.mean(self.spec.flux[pix])
median = np.median(self.spec.flux[pix])
stdv = np.std(self.spec.flux[pix]-conti[pix])
S2N = median / stdv
mssg = 'Mean={:g}, Median={:g}, S/N={:g}'.format(mean,median,S2N)
else:
# Find the spectral line (or request it!)
rng_wrest = iwv / (self.llist['z']+1)
gdl = np.where( (self.llist[self.llist['List']]['wrest']-rng_wrest[0]) *
(self.llist[self.llist['List']]['wrest']-rng_wrest[1]) < 0.)[0]
if len(gdl) == 1:
wrest = self.llist[self.llist['List']]['wrest'][gdl[0]]
else:
if len(gdl) == 0: # Search through them all
gdl = np.arange(len(self.llist[self.llist['List']]))
sel_widg = SelectLineWidget(self.llist[self.llist['List']][gdl])
sel_widg.exec_()
line = sel_widg.line
wrest = float(line.split('::')[1].lstrip())
# Generate the Spectral Line
from xastropy.spec.lines_utils import AbsLine
aline = AbsLine(wrest)
aline.analy['z'] = self.llist['z']
aline.spec = self.spec
# AODM
if event.key == 'N':
# Calculate the velocity limits and load-up
aline.analy['VLIM'] = const.c.to('km/s') * (
( iwv/(1+self.llist['z']) - wrest) / wrest )
# AODM
aline.aodm(conti=conti)
mssg = 'Using '+ aline.__repr__()
mssg = mssg + ' :: logN = {:g} +/- {:g}'.format(aline.attrib['logN'],
aline.attrib['sig_logN'])
elif event.key == 'E': #EW
aline.analy['WVMNX'] = iwv
aline.restew(conti=conti)
mssg = 'Using '+ aline.__repr__()
mssg = mssg + ' :: EW = {:g} +/- {:g}'.format(aline.attrib['EW'].to('mAA'),
aline.attrib['sigEW'].to('mAA'))
# Display values
try:
self.statusBar().showMessage(mssg)
except AttributeError:
pass
print(mssg)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
## Velocity plot
if event.key == 'v':
flg = 0
from xastropy.xguis import spec_guis as xsgui
z=self.llist['z']
# Check for a match in existing list and use it if so
if len(self.abs_sys) > 0:
zabs = np.array([abs_sys.zabs for abs_sys in self.abs_sys])
mt = np.where( np.abs(zabs-z) < 1e-4)[0]
else:
mt = []
if len(mt) == 1:
ini_abs_sys = self.abs_sys[mt[0]]
outfil = ini_abs_sys.absid_file
self.vplt_flg = 0 # Old one
print('Using existing ID file {:s}'.format(outfil))
else:
ini_abs_sys = None
outfil = None
self.vplt_flg = 1 # New one
# Outfil
if outfil is None:
i0 = self.spec.filename.rfind('/')
i1 = self.spec.filename.rfind('.')
if i0 < 0:
path = './ID_LINES/'
else:
path = self.spec.filename[0:i0]+'/ID_LINES/'
outfil = path + self.spec.filename[i0+1:i1]+'_z'+'{:.4f}'.format(z)+'_id.fits'
xxu.files.ensure_dir(outfil)
self.outfil = outfil
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Launch
gui = xsgui.XVelPltGui(self.spec, z=z, outfil=outfil, llist=self.llist,
abs_sys=ini_abs_sys, norm=self.norm, sel_wv=self.xval)
gui.exec_()
if gui.flg_quit == 0: # Quit without saving (i.e. discarded)
self.vplt_flg = 0
else:
# Push to Abs_Sys
if len(mt) == 1:
self.abs_sys[mt[0]] = gui.abs_sys
else:
self.abs_sys.append(gui.abs_sys)
print('Adding new abs system')
# Redraw
flg=1
# Dummy keys
if event.key in ['shift', 'control']:
flg = 0
# Draw
if flg==1: # Default is not to redraw
self.on_draw()
elif flg==2: # Layer (no clear)
self.on_draw(replot=False)
elif flg==-1: # Layer (no clear)
try:
self.statusBar().showMessage('Not a valid key! {:s}'.format(event.key))
except AttributeError:
pass
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
print('Out of bounds')
return
if event.button == 1: # Draw line
self.xval = event.xdata
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
# ######
def on_draw(self, replot=True):
""" Redraws the spectrum
"""
#
if replot is True:
self.ax.clear()
self.ax.plot(self.spec.dispersion, self.spec.flux, 'k-',drawstyle='steps-mid')
self.ax.plot(self.spec.dispersion, self.spec.sig, 'r:')
#self.ax.plot(self.spec.dispersion, self.spec.flux, 'k-',drawstyle='steps-mid')
#self.ax.plot(self.spec.dispersion, self.spec.sig, 'r:')
self.ax.set_xlabel('Wavelength')
self.ax.set_ylabel('Flux')
# Spectral lines?
if self.llist['Plot'] is True:
ylbl = self.psdict['ymnx'][1]-0.2*(self.psdict['ymnx'][1]-self.psdict['ymnx'][0])
z = self.llist['z']
wvobs = np.array((1+z) * self.llist[self.llist['List']]['wrest'])
gdwv = np.where( (wvobs > self.psdict['xmnx'][0]) &
(wvobs < self.psdict['xmnx'][1]))[0]
for kk in range(len(gdwv)):
jj = gdwv[kk]
wrest = self.llist[self.llist['List']]['wrest'][jj]
lbl = self.llist[self.llist['List']]['name'][jj]
# Plot
self.ax.plot(wrest*np.array([z+1,z+1]), self.psdict['ymnx'], 'b--')
# Label
self.ax.text(wrest*(z+1), ylbl, lbl, color='blue', rotation=90., size='small')
# Abs Sys?
if not self.abs_sys is None:
ylbl = self.psdict['ymnx'][0]+0.2*(self.psdict['ymnx'][1]-self.psdict['ymnx'][0])
clrs = ['red', 'green', 'cyan', 'orange', 'gray', 'purple']*10
for abs_sys in self.abs_sys:
ii = self.abs_sys.index(abs_sys)
wrest = np.array(abs_sys.lines.keys())
wvobs = wrest * (abs_sys.zabs+1)
gdwv = np.where( ((wvobs+5) > self.psdict['xmnx'][0]) & # Buffer for region
((wvobs-5) < self.psdict['xmnx'][1]))[0]
for kk in range(len(gdwv)):
jj = gdwv[kk]
if abs_sys.lines[wrest[jj]].analy['FLG_ANLY'] == 0:
continue
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Paint spectrum red
wvlim = wvobs[jj]*(1 + abs_sys.lines[wrest[jj]].analy['VLIM']/3e5)
pix = np.where( (self.spec.dispersion > wvlim[0]) & (self.spec.dispersion < wvlim[1]))[0]
self.ax.plot(self.spec.dispersion[pix], self.spec.flux[pix], '-',drawstyle='steps-mid',
color=clrs[ii])
# Label
lbl = abs_sys.lines[wrest[jj]].analy['IONNM']+' z={:g}'.format(abs_sys.zabs)
self.ax.text(wvobs[jj], ylbl, lbl, color=clrs[ii], rotation=90., size='x-small')
# Analysis? EW, Column
if self.adict['flg'] == 1:
self.ax.plot(self.adict['wv_1'], self.adict['C_1'], 'go')
elif self.adict['flg'] == 2:
self.ax.plot([self.adict['wv_1'], self.adict['wv_2']],
[self.adict['C_1'], self.adict['C_2']], 'g--', marker='o')
self.adict['flg'] = 0
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
self.ax.set_ylim(self.psdict['ymnx'])
# Draw
self.canvas.draw()
# Notes on usage
def help_notes():
doublets = [ 'Doublets --------',
'C: CIV',
'M: MgII',
'O: OVI',
'8: NeVIII',
'B: Lyb/Lya'
]
analysis = [ 'Analysis --------',
'N/N: Column density (AODM)',
'E/E: EW (boxcar)',
'$/$: stats on spectrum'
]
# #####
class PlotLinesWidget(QtGui.QWidget):
''' Widget to set up spectral lines for plotting
13-Dec-2014 by JXP
'''
def __init__(self, parent=None, status=None, init_llist=None, init_z=None):
'''
'''
super(PlotLinesWidget, self).__init__(parent)
# Initialize
if not status is None:
self.statusBar = status
if init_z is None:
init_z = 0.
# Create a dialog window for redshift
z_label = QtGui.QLabel('z=')
self.zbox = QtGui.QLineEdit()
self.zbox.z_frmt = '{:.7f}'
self.zbox.setText(self.zbox.z_frmt.format(init_z))
self.zbox.setMinimumWidth(50)
self.connect(self.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
# Create the line list
self.lists = ['None', 'grb.lst', 'dla.lst', 'lls.lst',
'lyman.lst', 'gal_vac.lst', 'ne8.lst',
'lowz_ovi.lst', 'casbah.lst']
list_label = QtGui.QLabel('Line Lists:')
self.llist_widget = QtGui.QListWidget(self)
for ilist in self.lists:
self.llist_widget.addItem(ilist)
self.llist_widget.setCurrentRow(0)
self.llist_widget.currentItemChanged.connect(self.on_list_change)
self.llist_widget.setMaximumHeight(100)
# Input line list?
if init_llist is None:
self.llist = {} # Dict for the line lists
self.llist['Plot'] = False
self.llist['z'] = 0.
self.llist['List'] = 'None'
else: # Fill it all up and select
self.llist = init_llist
if not init_llist['List'] in self.lists:
self.lists.append(init_llist['List'])
self.llist_widget.addItem(init_llist['List'])
self.llist_widget.setCurrentRow(len(self.lists)-1)
else:
idx = self.lists.index(init_llist['List'])
self.llist_widget.setCurrentRow(idx)
try:
self.zbox.setText(self.zbox.z_frmt.format(init_llist['z']))
except KeyError:
pass
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(z_label)
vbox.addWidget(self.zbox)
vbox.addWidget(list_label)
vbox.addWidget(self.llist_widget)
self.setLayout(vbox)
self.setMaximumHeight(200)
def on_list_change(self,curr,prev):
llist = str(curr.text())
# Print
try:
self.statusBar().showMessage('You chose: {:s}'.format(llist))
except AttributeError:
print('You chose: {:s}'.format(curr.text()))
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.llist = set_llist(llist,in_dict=self.llist)
# Try to draw
if self.llist['Plot'] is True:
try:
self.spec_widg.on_draw()
except AttributeError:
return
def setz(self):
sstr = unicode(self.zbox.text())
try:
self.llist['z'] = float(sstr)
except ValueError:
try:
self.statusBar().showMessage('ERROR: z Input must be a float! Try again..')
except AttributeError:
print('ERROR: z Input must be a float! Try again..')
self.zbox.setText(self.zbox.z_frmt.format(self.llist['z']))
return
# Report
try:
self.statusBar().showMessage('z = {:g}'.format(self.llist['z']))
except AttributeError:
print('z = {:g}'.format(self.llist['z']))
# Try to draw
try:
self.spec_widg.on_draw()
except AttributeError:
return
# #####
class SelectLineWidget(QtGui.QDialog):
''' Widget to select a spectral line
inp: string or dict or Table
Input line list
15-Dec-2014 by JXP
'''
def __init__(self, inp, parent=None):
'''
'''
super(SelectLineWidget, self).__init__(parent)
# Line list Table
if isinstance(inp,Table):
lines = inp
else:
raise ValueError('SelectLineWidget: Wrong type of input')
self.resize(250, 800)
# Create the line list
line_label = QtGui.QLabel('Lines:')
self.lines_widget = QtGui.QListWidget(self)
self.lines_widget.addItem('None')
self.lines_widget.setCurrentRow(0)
#xdb.set_trace()
# Loop on lines (could put a preferred list first)
nlin = len(lines['wrest'])
for ii in range(nlin):
self.lines_widget.addItem('{:s} :: {:.4f}'.format(lines['name'][ii],
lines['wrest'][ii]))
self.lines_widget.currentItemChanged.connect(self.on_list_change)
#self.scrollArea = QtGui.QScrollArea()
# Quit
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(self.close)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(line_label)
vbox.addWidget(self.lines_widget)
vbox.addWidget(qbtn)
self.setLayout(vbox)
def on_list_change(self,curr,prev):
self.line = str(curr.text())
# Print
print('You chose: {:s}'.format(curr.text()))
# #####
class SelectedLinesWidget(QtGui.QWidget):
''' Widget to show and enable lines to be selected
inp: Table or (future: string or dict)
Input line list
24-Dec-2014 by JXP
'''
def __init__(self, inp, parent=None, init_select=None, plot_widget=None):
'''
'''
super(SelectedLinesWidget, self).__init__(parent)
# Line list Table
if isinstance(inp,Table):
self.lines = inp
else:
raise ValueError('SelectLineWidget: Wrong type of input')
self.plot_widget = plot_widget
# Create the line list
line_label = QtGui.QLabel('Lines:')
self.lines_widget = QtGui.QListWidget(self)
self.lines_widget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
# Initialize list
self.item_flg = 0
self.init_list()
# Initial selection
if init_select is None:
self.selected = [0]
else:
self.selected = init_select
for iselect in self.selected:
self.lines_widget.item(iselect).setSelected(True)
self.lines_widget.scrollToItem( self.lines_widget.item( self.selected[0] ) )
# Events
#self.lines_widget.itemClicked.connect(self.on_list_change)
self.lines_widget.itemSelectionChanged.connect(self.on_item_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(line_label)
vbox.addWidget(self.lines_widget)
self.setLayout(vbox)
def init_list(self):
nlin = len(self.lines['wrest'])
for ii in range(nlin):
self.lines_widget.addItem('{:s} :: {:.3f}'.format(self.lines['name'][ii],
self.lines['wrest'][ii]))
def on_item_change(self): #,item):
# For big changes
if self.item_flg == 1:
return
all_items = [self.lines_widget.item(ii) for ii in range(self.lines_widget.count())]
sel_items = self.lines_widget.selectedItems()
self.selected = [all_items.index(isel) for isel in sel_items]
self.selected.sort()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Update llist
try:
self.plot_widget.llist['show_line'] = self.selected
except AttributeError:
return
else:
self.plot_widget.on_draw()
def on_list_change(self,lines):
# Clear
self.item_flg = 1
self.lines_widget.clear()
# Initialize
self.lines = lines
self.init_list()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Set selected
for iselect in self.selected:
self.lines_widget.item(iselect).setSelected(True)
self.lines_widget.scrollToItem( self.lines_widget.item( self.selected[0] ) )
self.item_flg = 0
# #####
class AbsSysWidget(QtGui.QWidget):
''' Widget to organize AbsSys along a given sightline
Parameters:
-----------
abssys_list: List
String list of abssys files
16-Dec-2014 by JXP
'''
def __init__(self, abssys_list, parent=None):
'''
'''
super(AbsSysWidget, self).__init__(parent)
#if not status is None:
# self.statusBar = status
self.abssys_list = abssys_list
# Create the line list
list_label = QtGui.QLabel('Abs Systems:')
self.abslist_widget = QtGui.QListWidget(self)
self.abslist_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.abslist_widget.addItem('None')
#self.abslist_widget.addItem('Test')
# Lists
self.abs_sys = []
self.items = []
self.all_items = []
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLS_System.from_absid_fil(abssys_fil))
self.add_item(abssys_fil)
self.abslist_widget.setCurrentRow(0)
self.abslist_widget.itemSelectionChanged.connect(self.on_list_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(list_label)
# Buttons
buttons = QtGui.QWidget()
self.refine_button = QtGui.QPushButton('Refine', self)
#self.refine_button.clicked.connect(self.refine) # CONNECTS TO A PARENT
reload_btn = QtGui.QPushButton('Reload', self)
reload_btn.clicked.connect(self.reload)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.refine_button)
hbox1.addWidget(reload_btn)
buttons.setLayout(hbox1)
vbox.addWidget(buttons)
vbox.addWidget(self.abslist_widget)
self.setLayout(vbox)
# ##
def on_list_change(self):
items = self.abslist_widget.selectedItems()
# Empty the list
#self.abs_sys = []
if len(self.abs_sys) > 0:
for ii in range(len(self.abs_sys)-1,-1,-1):
self.abs_sys.pop(ii)
# Load up abs_sys (as need be)
new_items = []
for item in items:
txt = item.text()
# Dummy
if txt == 'None':
continue
print('Including {:s} in the list'.format(txt))
# Using LLS for now. Might change to generic
new_items.append(txt)
ii = self.all_items.index(txt)
self.abs_sys.append(self.all_abssys[ii])
# Pass back
self.items = new_items
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
def add_fil(self,abssys_fil):
self.abssys_list.append( abssys_fil )
self.add_item(abssys_fil)
def add_item(self,abssys_fil):
ipos0 = abssys_fil.rfind('/') + 1
ipos1 = abssys_fil.rfind('.fits')
self.all_items.append( abssys_fil[ipos0:ipos1] )
self.abslist_widget.addItem(abssys_fil[ipos0:ipos1] )
def reload(self):
print('AbsSysWidget: Reloading systems..')
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLS_System.from_absid_fil(abssys_fil))
#self.add_item(abssys_fil)
self.on_list_change()
# ######################
class VelPlotWidget(QtGui.QWidget):
''' Widget for a velocity plot with interaction.
19-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.], abs_sys=None):
'''
spec = Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
'''
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
super(VelPlotWidget, self).__init__(parent)
# Initialize
spec, spec_fil = read_spec(ispec)
self.spec = spec
self.spec_fil = spec_fil
self.z = z
self.vmnx = vmnx
self.norm = norm
# Abs_System
self.abs_sys = abs_sys
if self.abs_sys is None:
self.abs_sys = xiaa.Generic_System(None)
self.abs_sys.zabs = self.z
else:
self.z = self.abs_sys.zabs
# Line list
if llist is None:
try:
lwrest = self.abs_sys.lines.keys()
except AttributeError:
lwrest = None
if not lwrest is None:
llist = set_llist(lwrest)
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = navigate(0,0,init=True)
# Status Bar?
#if not status is None:
# self.statusBar = status
# Line List
if llist is None:
self.llist = set_llist('lls.lst')
else:
self.llist = llist
self.llist['z'] = self.z
# Indexing for line plotting
self.idx_line = 0
self.init_lines()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Sub_plots
self.sub_xy = [3,4]
self.fig.subplots_adjust(hspace=0.0, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Load them up for display
def init_lines(self):
wvmin = np.min(self.spec.dispersion)
wvmax = np.max(self.spec.dispersion)
#
wrest = u.Quantity(self.llist[self.llist['List']]['wrest'])
wvobs = (1+self.z) * wrest
gdlin = np.where( (wvobs > wvmin) & (wvobs < wvmax) )[0]
self.llist['show_line'] = gdlin
existing_lines = self.abs_sys.lines.keys()
# Update/generate lines
for idx in gdlin:
# Generate?
kwrest = wrest[idx].value
if not kwrest in existing_lines:
self.abs_sys.lines[kwrest] = xspec.analysis.Spectral_Line(kwrest)
print('VelPlot: Generating line {:g}'.format(kwrest))
self.abs_sys.lines[kwrest].analy['VLIM'] = np.array([self.vmnx[0]/2.,
self.vmnx[1]/2.])
self.abs_sys.lines[kwrest].analy['FLG_ANLY'] = 2 # Init to ok
# Spec file
if not self.spec_fil is None:
self.abs_sys.lines[kwrest].analy['DATFIL'] = self.spec_fil
# Key stroke
def on_key(self,event):
# Init
rescale = True
fig_clear = False
wrest = None
flg = 0
sv_idx = self.idx_line
## Change rows/columns
if event.key == 'k':
self.sub_xy[0] = max(0, self.sub_xy[0]-1)
if event.key == 'K':
self.sub_xy[0] = self.sub_xy[0]+1
if event.key == 'c':
self.sub_xy[1] = max(0, self.sub_xy[1]-1)
if event.key == 'C':
self.sub_xy[1] = max(0, self.sub_xy[1]+1)
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
if event.key == '-':
self.idx_line = max(0, self.idx_line-self.sub_xy[0]*self.sub_xy[1]) # Min=0
if self.idx_line == sv_idx:
print('Edge of list')
if event.key == '=':
self.idx_line = min(len(self.llist['show_line'])-self.sub_xy[0]*self.sub_xy[1],
self.idx_line + self.sub_xy[0]*self.sub_xy[1])
if self.idx_line == sv_idx:
print('Edge of list')
## Reset z
if event.key == 'z':
from astropy.relativity import velocities
newz = velocities.z_from_v(self.z, event.xdata)
self.z = newz
self.abs_sys.zabs = newz
# Drawing
self.psdict['xmnx'] = self.vmnx
# Single line command
if event.key in ['1','2','B','U','L','N','V','A', 'x', 'X']:
try:
wrest = event.inaxes.get_gid()
except AttributeError:
return
else:
kwrest = wrest.value
## Velocity limits
if event.key == '1':
self.abs_sys.lines[kwrest].analy['VLIM'][0] = event.xdata
if event.key == '2':
self.abs_sys.lines[kwrest].analy['VLIM'][1] = event.xdata
if event.key == '!':
for key in self.abs_sys.lines.keys():
try:
self.abs_sys.lines[key].analy['VLIM'][0] = event.xdata
except KeyError:
print('Not setting VLIM for {:g}'.format(key))
if event.key == '@':
for key in self.abs_sys.lines.keys():
try:
self.abs_sys.lines[key].analy['VLIM'][1] = event.xdata
except KeyError:
print('Not setting VLIM for {:g}'.format(key))
## Line type
if event.key == 'A': # Add to lines
if not kwrest in self.abs_sys.lines.keys():
self.abs_sys.lines[kwrest] = xspec.analysis.Spectral_Line(wrest)
print('VelPlot: Generating line {:g}'.format(kwrest))
self.abs_sys.lines[kwrest].analy['VLIM'] = np.array([self.vmnx[0]/2.,
self.vmnx[1]/2.])
self.abs_sys.lines[kwrest].analy['FLG_ANLY'] = 2 # Init to ok
self.abs_sys.lines[kwrest].analy['DATFIL'] = self.spec_fil
if event.key == 'x': # Remove line
if kwrest in self.abs_sys.lines.keys():
self.abs_sys.lines.pop(kwrest)
print('VelPlot: Removed line {:g}'.format(wrest))
if event.key == 'X': # Remove all lines (might add warning widget)
# Double check
gui = xguiu.WarningWidg('About to remove all lines. \n Continue??')
gui.exec_()
if gui.ans is False:
return
#
for kwrest in self.abs_sys.lines.keys():
self.abs_sys.lines.pop(wrest)
print('VelPlot: Removed line {:g}'.format(wrest))
if event.key == 'B': # Toggle blend
try:
feye = self.abs_sys.lines[kwrest].analy['FLG_EYE']
except KeyError:
feye = 0
feye = (feye + 1) % 2
self.abs_sys.lines[kwrest].analy['FLG_EYE'] = feye
if event.key == 'N': # Toggle NG
try:
fanly = self.abs_sys.lines[kwrest].analy['FLG_ANLY']
except KeyError:
fanly = 2
if fanly == 0:
fanly = 2 # Not using 1 anymore..
else:
fanly = 0
self.abs_sys.lines[kwrest].analy['FLG_ANLY'] = fanly
if event.key == 'V': # Normal
self.abs_sys.lines[kwrest].analy['FLG_LIMIT'] = 1
if event.key == 'L': # Lower limit
self.abs_sys.lines[kwrest].analy['FLG_LIMIT'] = 2
if event.key == 'U': # Upper limit
self.abs_sys.lines[kwrest].analy['FLG_LIMIT'] = 3
# AODM plot
if event.key == ':': #
# Grab good lines
from xastropy.xguis import spec_guis as xsgui
gdl = []
for iwr in self.abs_sys.lines.keys():
if self.abs_sys.lines[iwr].analy['FLG_ANLY'] > 0:
gdl.append(iwr)
# Launch AODM
gui = xsgui.XAODMGui(self.spec, self.z, gdl, vmnx=self.vmnx, norm=self.norm)
gui.exec_()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if not wrest is None: # Single window
flg = 3
if event.key in ['c','C','k','K','W','!', '@', '=', '-', 'X', 'z','R']: # Redraw all
flg = 1
if event.key in ['Y']:
rescale = False
if event.key in ['k','c','C','K', 'R']:
fig_clear = True
if flg==1: # Default is not to redraw
self.on_draw(rescale=rescale, fig_clear=fig_clear)
elif flg==2: # Layer (no clear)
self.on_draw(replot=False, rescale=rescale)
elif flg==3: # Layer (no clear)
self.on_draw(in_wrest=wrest, rescale=rescale)
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, replot=True, in_wrest=None, rescale=True, fig_clear=False):
""" Redraws the figure
"""
#
if replot is True:
if fig_clear:
self.fig.clf()
# Loop on windows
all_idx = self.llist['show_line']
nplt = self.sub_xy[0]*self.sub_xy[1]
if len(all_idx) <= nplt:
self.idx_line = 0
subp = np.arange(nplt) + 1
subp_idx = np.hstack(subp.reshape(self.sub_xy[0],self.sub_xy[1]).T)
for jj in range(min(nplt, len(all_idx))):
try:
idx = all_idx[jj+self.idx_line]
except IndexError:
continue # Likely too few lines
# Grab line
#wvobs = np.array((1+self.z) * self.llist[self.llist['List']]['wrest'][idx])
wrest = (self.llist[self.llist['List']]['wrest'][idx] *
self.llist[self.llist['List']]['wrest'].unit)
kwrest = wrest.value # For the Dict
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Single window?
if not in_wrest is None:
if np.abs(wrest-in_wrest) > (1e-3*u.AA):
continue
# Generate plot
self.ax = self.fig.add_subplot(self.sub_xy[0],self.sub_xy[1], subp_idx[jj])
self.ax.clear()
#print('Plotting {:g}, {:d}'.format(wrest,subp_idx[jj]))
# Zero line
self.ax.plot( [0., 0.], [-1e9, 1e9], ':', color='gray')
# Velocity
wvobs = (1+self.z) * wrest
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s').value
# Plot
self.ax.plot(velo, self.spec.flux, 'k-',drawstyle='steps-mid')
# GID for referencing
self.ax.set_gid(wrest)
# Labels
#if jj >= (self.sub_xy[0]-1)*(self.sub_xy[1]):
if ((jj+1) % self.sub_xy[0]) == 0:
self.ax.set_xlabel('Relative Velocity (km/s)')
else:
self.ax.get_xaxis().set_ticks([])
#if ((jj+1) // 2 == 0) & (jj < self.sub_xy[0]):
# self.ax.set_ylabel('Relative Flux')
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
lbl = self.llist[self.llist['List']]['name'][idx]
self.ax.text(0.1, 0.05, lbl, color='blue', transform=self.ax.transAxes,
size='x-small', ha='left')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
# Rescale?
if (rescale is True) & (self.norm is False):
gdp = np.where( (velo > self.psdict['xmnx'][0]) &
(velo < self.psdict['xmnx'][1]))[0]
if len(gdp) > 5:
per = xstats.basic.perc(self.spec.flux[gdp])
self.ax.set_ylim((0., 1.1*per[1]))
else:
self.ax.set_ylim(self.psdict['ymnx'])
else:
self.ax.set_ylim(self.psdict['ymnx'])
# Fonts
xputils.set_fontsize(self.ax,6.)
# Abs_Sys: Color the lines
if not self.abs_sys is None:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
try:
vlim = self.abs_sys.lines[kwrest].analy['VLIM']
except KeyError:
continue
# Color coding
clr = 'black'
try: # .clm style
flag = self.abs_sys.lines[kwrest].analy['FLAGS'][0]
except KeyError:
flag = None
else:
if flag <= 1: # Standard detection
clr = 'green'
elif flag in [2,3]:
clr = 'blue'
elif flag in [4,5]:
clr = 'purple'
# ABS ID
try: # NG?
flagA = self.abs_sys.lines[kwrest].analy['FLG_ANLY']
except KeyError:
flagA = None
else:
if (flagA>0) & (clr == 'black'):
clr = 'green'
try: # Limit?
flagL = self.abs_sys.lines[kwrest].analy['FLG_LIMIT']
except KeyError:
flagL = None
else:
if flagL == 2:
clr = 'blue'
if flagL == 3:
clr = 'purple'
try: # Blends?
flagE = self.abs_sys.lines[kwrest].analy['FLG_EYE']
except KeyError:
flagE = None
else:
if flagE == 1:
clr = 'orange'
if flagA == 0:
clr = 'red'
pix = np.where( (velo > vlim[0]) & (velo < vlim[1]))[0]
self.ax.plot(velo[pix], self.spec.flux[pix], '-',
drawstyle='steps-mid', color=clr)
# Draw
self.canvas.draw()
# ######################
class AODMWidget(QtGui.QWidget):
''' Widget for comparing tau_AODM profiles
19-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, parent=None, vmnx=[-300., 300.],
norm=True):
'''
spec = Spectrum1D
'''
super(AODMWidget, self).__init__(parent)
# Initialize
self.spec = spec
self.norm = norm
self.z = z
self.vmnx = vmnx
self.wrest = wrest
self.lines = []
for iwrest in self.wrest:
self.lines.append(xspec.analysis.Spectral_Line(iwrest))
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = navigate(0,0,init=True)
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Key stroke
def on_key(self,event):
# Init
rescale = True
flg = 0
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
if event.key in ['b','t','W','Z','Y','l','r']:
rescale = False
self.on_draw(rescale=rescale)
# Click of main mouse button
def on_click(self,event):
return # DO NOTHING FOR NOW
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw()
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, rescale=True):
""" Redraws the figure
"""
#
self.ax = self.fig.add_subplot(1,1,1)
self.ax.clear()
ymx = 0.
for ii,iwrest in enumerate(self.wrest):
# Velocity
wvobs = (1+self.z) * iwrest
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s').value
gdp = np.where((velo > self.psdict['xmnx'][0]) &
(velo < self.psdict['xmnx'][1]))[0]
# Normalize?
if self.norm is False:
per = xstats.basic.perc(self.spec.flux[gdp])
fsplice = per[1] / self.spec.flux[gdp]
else:
fsplice = 1./ self.spec.flux[gdp]
# AODM
cst = (10.**14.5761)/(self.lines[ii].atomic['fval']*iwrest.value)
Naodm = np.log(fsplice)*cst
ymx = max(ymx,np.max(Naodm))
# Plot
line, = self.ax.plot(velo[gdp], Naodm, '-', drawstyle='steps-mid')
# Labels
lbl = '{:g}'.format(iwrest)
clr = plt.getp(line, 'color')
self.ax.text(0.1, 1.-(0.05+0.05*ii), lbl, color=clr,
transform=self.ax.transAxes, size='small', ha='left')
self.ax.set_xlabel('Relative Velocity (km/s)')
self.ax.set_ylabel('N(AODM)')
# Zero line
self.ax.plot( [0., 0.], [-1e29, 1e29], ':', color='gray')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
if rescale:
self.psdict['ymnx'] = [0.05*ymx, ymx*1.1]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.ax.set_ylim(self.psdict['ymnx'])
# Fonts
#xputils.set_fontsize(self.ax,6.)
# Draw
self.canvas.draw()
# ######
# Plot Doublet
def set_doublet(iself,event):
''' Set z and plot doublet
'''
wv_dict = {'C': (1548.195, 1550.770, 'CIV'), 'M': (2796.352, 2803.531, 'MgII'),
'O': (1031.9261, 1037.6167, 'OVI'), '8': (770.409, 780.324, 'NeVIII'),
'B': (1025.4433, 1215.6701, 'Lyba')}
wrest = wv_dict[event.key]
# Set z
iself.zabs = event.xdata/wrest[0] - 1.
try:
iself.statusBar().showMessage('z = {:g} for {:s}'.format(iself.zabs, wrest[2]))
except AttributeError:
print('z = {:g} for {:s}'.format(iself.zabs, wrest[2]))
return np.array(wrest[0:2])*(1.+iself.zabs)
# ######
# Navigate
def navigate(psdict,event,init=False):
''' Method to Navigate spectrum
init: (False) Initialize
Just pass back valid key strokes
'''
# Initalize
if init is True:
return ['l','r','b','t','i','I', 'o','O', '[',']','W','Z', 'Y', '{', '}']
#
if (not isinstance(event.xdata,float)) or (not isinstance(event.ydata,float)):
print('Navigate: You entered the {:s} key out of bounds'.format(event.key))
return 0
if event.key == 'l': # Set left
psdict['xmnx'][0] = event.xdata
elif event.key == 'r': # Set Right
psdict['xmnx'][1] = event.xdata
elif event.key == 'b': # Set Bottom
psdict['ymnx'][0] = event.ydata
elif event.key == 't': # Set Top
psdict['ymnx'][1] = event.ydata
elif event.key == 'i': # Zoom in (and center)
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/4.
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'I': # Zoom in (and center)
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/16.
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'o': # Zoom in (and center)
deltx = psdict['xmnx'][1]-psdict['xmnx'][0]
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'O': # Zoom in (and center)
deltx = psdict['xmnx'][1]-psdict['xmnx'][0]
psdict['xmnx'] = [event.xdata-2*deltx, event.xdata+2*deltx]
elif event.key == 'Y': # Zoom in (and center)
delty = psdict['ymnx'][1]-psdict['ymnx'][0]
psdict['ymnx'] = [event.ydata-delty, event.ydata+delty]
elif event.key in ['[',']','{','}']: # Pan
center = (psdict['xmnx'][1]+psdict['xmnx'][0])/2.
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/2.
if event.key == '[':
new_center = center - deltx
elif event.key == ']':
new_center = center + deltx
elif event.key == '{':
new_center = center - 4*deltx
elif event.key == '}':
new_center = center + 4*deltx
psdict['xmnx'] = [new_center-deltx, new_center+deltx]
elif event.key == 'W': # Reset the Window
psdict['xmnx'] = psdict['sv_xy'][0]
psdict['ymnx'] = psdict['sv_xy'][1]
elif event.key == 'Z': # Zero
psdict['ymnx'][0] = 0.
else:
if not (event.key in ['shift']):
rstr = 'Key {:s} not supported.'.format(event.key)
print(rstr)
return 0
return 1
# ######
#
def set_llist(llist,in_dict=None):
''' Method to set a line list dict for the Widgets
'''
if in_dict is None:
in_dict = {}
if isinstance(llist,str) or isinstance(llist,unicode): # Set line list from a file
in_dict['List'] = llist
if llist == 'None':
in_dict['Plot'] = False
else:
in_dict['Plot'] = True
# Load?
if not (llist in in_dict):
#line_file = xa_path+'/data/spec_lines/'+llist
llist_cls = xspec.abs_line.Abs_Line_List(llist)
in_dict[llist] = llist_cls.data
elif isinstance(llist,list): # Set from a list of wrest
from astropy.table import Column
in_dict['List'] = 'input.lst'
in_dict['Plot'] = True
# Fill
llist.sort()
tmp_dict = {}
# Parse from grb.lst
line_file = xa_path+'/data/spec_lines/grb.lst'
llist_cls = xspec.abs_line.Abs_Line_List(line_file)
adict = llist_cls.data
# Fill
names = []
fval = []
for wrest in llist:
mt = np.where(np.abs(wrest-adict['wrest']) < 1e-3)[0]
if len(mt) != 1:
raise ValueError('Problem!')
names.append(adict['name'][mt][0])
fval.append(adict['fval'][mt][0])
# Set
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Generate a Table
col0 = Column(np.array(llist), name='wrest', unit=u.AA) # Assumed Angstroms
col1 = Column(np.array(names), name='name')
col2 = Column(np.array(fval), name='fval')
in_dict['input.lst'] = Table( (col0,col1,col2) )
# Return
return in_dict
# Read spectrum, pass back it and spec_file name
def read_spec(ispec, second_file=None):
#
if isinstance(ispec,str) or isinstance(ispec,unicode):
spec_fil = ispec
spec = xspec.readwrite.readspec(spec_fil)
# Second file?
if not second_file is None:
spec2 = xspec.readwrite.readspec(second_file)
# Scale for convenience of plotting
xper1 = xstats.basic.perc(spec.flux, per=0.9)
xper2 = xstats.basic.perc(spec2.flux, per=0.9)
scl = xper1[1]/xper2[1]
# Stitch together
wave3 = np.append(spec.dispersion, spec2.dispersion)
flux3 = np.append(spec.flux, spec2.flux*scl)
sig3 = np.append(spec.sig, spec2.sig*scl)
#xdb.set_trace()
spec3 = Spectrum1D.from_array(wave3, flux3, uncertainty=StdDevUncertainty(sig3))
# Overwrite
spec = spec3
spec.filename = spec_fil
else:
spec = ispec # Assuming Spectrum1D
spec_fil = spec.filename # Grab from Spectrum1D
# Return
return spec, spec_fil
# ################
# TESTING
if __name__ == "__main__":
from xastropy import spec as xspec
flg_fig = 0
flg_fig += 2**0 # ExamineSpecWidget
#flg_fig += 2**1 # PlotLinesWidget
#flg_fig += 2**2 # SelectLineWidget
#flg_fig += 2**3 # AbsSysWidget
#flg_fig += 2**4 # VelPltWidget
#flg_fig += 2**5 # SelectedLinesWidget
#flg_fig += 2**6 # AODMWidget
# ExamineSpec
if (flg_fig % 2) == 1:
app = QtGui.QApplication(sys.argv)
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = xspec.readwrite.readspec(spec_fil)
app.setApplicationName('XSpec')
main = ExamineSpecWidget(spec)
main.show()
sys.exit(app.exec_())
# PltLineWidget
if (flg_fig % 2**2) >= 2**1:
app = QtGui.QApplication(sys.argv)
app.setApplicationName('PltLine')
main = PlotLinesWidget()
main.show()
sys.exit(app.exec_())
# SelectLineWidget
if (flg_fig % 2**3) >= 2**2:
line_file = xa_path+'/data/spec_lines/grb.lst'
llist_cls = xspec.abs_line.Abs_Line_List(line_file)
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectLine')
main = SelectLineWidget(llist_cls.data)
main.show()
app.exec_()
print(main.line)
sys.exit()
# AbsSys Widget
if (flg_fig % 2**4) >= 2**3:
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2319-1040_z2.675_id.fits'
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AbsSys')
main = AbsSysWidget([abs_fil,abs_fil2])
main.show()
sys.exit(app.exec_())
# VelPlt Widget
if (flg_fig % 2**5) >= 2**4:
specf = 1
if specf == 0: # PH957 DLA
# Spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = xspec.readwrite.readspec(spec_fil)
# Abs_sys
abs_sys = xiaa.Generic_System(None)
abs_sys.clm_fil = '/Users/xavier/DLA/Abund/PH957.z2309.clm'
abs_sys.get_ions(skip_ions=True, fill_lines=True)
abs_sys.zabs = abs_sys.clm_analy.zsys
elif specf == 1: # UM184 LLS
# Spectrum
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = xspec.readwrite.readspec(spec_fil)
# Abs_sys
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/UM184_z2.930_id.fits'
abs_sys = xiaa.Generic_System(None)
abs_sys.parse_absid_file(abs_fil)
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('VelPlot')
main = VelPlotWidget(spec, abs_sys=abs_sys)
main.show()
sys.exit(app.exec_())
# SelectedLines Widget
if (flg_fig % 2**6) >= 2**5:
llist = set_llist('grb.lst')
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectedLines')
main = SelectedLinesWidget(llist['grb.lst'])
main.show()
sys.exit(app.exec_())
# AODM Widget
if (flg_fig % 2**7) >= 2**6:
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = xspec.readwrite.readspec(spec_fil)
z=2.96916
lines = np.array([1548.195, 1550.770]) * u.AA
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = AODMWidget(spec, z, lines)
main.show()
sys.exit(app.exec_())
| bsd-3-clause |
ste616/atca | atsenscalc/code/atsenscalc_routines.py | 1 | 41093 | ######################################################################
# The ATCA Sensitivity Calculator
# Calculation subroutines.
# Copyright 2015 Jamie Stevens, CSIRO
#
# This file is part of the ATCA Sensitivity Calculator.
#
# The ATCA Sensitivity Calculator is free software: you can
# redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# The ATCA Sensitivity Calculator is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with the ATCA Sensitivity Calculator.
# If not, see <http://www.gnu.org/licenses/>.
import os
import math
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import refract as refract
# Define some global parameters.
frequencyBands = {
# Low and high frequencies for each of the ATCA receivers, and the file to
# read to get Tsys information.
'16cm': { 'low': 1730, 'high': 2999,
'tsys': "systemps/ca02_21cm_x_polarisation.avg" },
'4cm': { 'low': 4928, 'high': 10928,
'tsys': "systemps/ca02_4cm_y_polarisation.avg" },
'15mm': { 'low': 16000, 'high': 25000,
'tsys': "systemps/12mm_recvtemps.avg" },
'7mm': { 'low': 30000, 'high': 50000,
'tsys': "systemps/ca02_7mm.avg" },
'3mm': { 'low': 83857, 'high': 104785,
'tsys': "systemps/nominal_3mm.avg" }
}
sideBands = {
# Which sideband is used per frequency range.
'USB': [ [ 4928, 10928 ], [ 41000, 50000 ], [ 97800, 104785 ] ],
'LSB': [ [ 1730, 2999 ], [ 16000, 25000 ], [ 30000, 40999 ],
[ 83857, 97799 ] ]
}
continuumBandwidth = 2049.0 # MHz bandwidth of the continuum bands.
nZoomChannels = 2048 # the number of channels each zoom has
antennaDiameter = 22.0 # metres
speedoflight = 299792458.0 # Speed of light, m/s
boltzmann = 1.3806488e-23 # J / K
latitudeATCA = -30.31288472 # degrees, latitude of the ATCA.
longitudeATCA = 149.5501388 # degrees, longitude of the ATCA.
eastAngle = longitudeATCA - 90.0
cangle = math.radians(eastAngle)
cosl = math.cos(math.radians(latitudeATCA))
sinl = math.sin(math.radians(latitudeATCA))
channelFlagging = {
# The channels always flagged in the continuum band.
'continuum': { 'CFB1M': [ 513, 1025, 1537 ],
'CFB64M': [ 9, 17, 25 ] },
# The sampling clock birdies.
'birdies': { 'CFB1M': [ 129, 157, 257, 641, 769, 1153, 1177, 1281, 1409, 1793, 1921 ] }
}
frequencyFlagging = {
# Known RFI ranges.
'rfi': [ [ 1059.0, 1075.0 ], [ 1103.0, 1117.0 ], [ 1145.0, 1159.0 ], [ 1165.0, 1191.0 ],
[ 1217.0, 1239.0 ], [ 1240.0, 1252.0 ], [ 1380.0, 1382.0 ], [ 1428.0, 1432.0 ],
[ 1436.0, 1450.0 ], [ 1456.0, 1460.0 ], [ 1493.0, 1495.0 ], [ 1499.0, 1511.0 ],
[ 1525.0, 1628.0 ], [ 2489.0, 2496.0 ], [ 2879.0, 2881.0 ], [ 5622.0, 5628.0 ],
[ 5930.0, 5960.0 ], [ 6440.0, 6480.0 ], [ 7747.0, 7777.0 ], [ 7866.0, 7896.0 ],
[ 8058.0, 8088.0 ], [ 8177.0, 8207.0 ] ]
}
# Some conversion factors.
mhzToHz = 1.0e6 # Convert MHz to Hz
degreesToArcmin = 60.0 # Convert degrees to arcminutes
degreesToArcsec = 3600.0 # Convert degrees to arcseconds
jyToSI = 1e-26 # Convert Jy to W.m^-2.s^-1
mToKm = 1e-3 # Convert m to km
# Our error handling class.
class CalcError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def frequencyOverlap(inf1, inf2, ing1, ing2):
# Given two frequency ranges f1 - f2, g1 - g2, returns the amount
# of frequency overlap.
overlap = 0.0
# Check for correct frequency order.
f1 = inf1
f2 = inf2
g1 = ing1
g2 = ing2
if (f2 < f1):
f1 = inf2
f2 = inf1
if (g2 < g1):
g1 = ing2
g2 = ing1
# Assess overlap.
if ((g1 >= f1) and (g1 <= f2)):
a1 = g2 - g1
a2 = f2 - g1
overlap += min(a1, a2)
elif ((g2 >= f1) and (g2 <= f2)):
a1 = g2 - g1
a2 = g2 - f1
overlap += min(a1, a2)
elif ((f1 >= g1) and (f1 <= g2)):
a1 = f2 - f1
a2 = g2 - f1
overlap += min(a1, a2)
elif ((f2 >= g1) and (f2 <= g2)):
a1 = f2 - f1
a2 = f2 - g1
overlap += min(a1, a2)
return overlap
def frequencyBand(freq):
# Which band does the frequency belong to?
for b in frequencyBands:
if (freq >= frequencyBands[b]['low'] and
freq <= frequencyBands[b]['high']):
return b
return None
def channelResolution(corrMode):
# Return the width of each type of channel for a given correlator mode.
if (corrMode == 'CFB1M'):
return { 'continuum': 1.0, 'zoom': (1.0 / 2048.0) }
elif (corrMode == 'CFB64M'):
return { 'continuum': 64.0, 'zoom': (64.0 / 2048.0) }
else:
return None
def makeTemplate(centreFreq, bandwidth, channelWidth):
# Make a blank spectrum that covers the specified frequency range with
# the correct channel resolution.
c = []
v = []
f = []
n = []
x = []
p = []
if (type(bandwidth) is float):
lowFreq = centreFreq - (bandwidth - channelWidth) / 2
highFreq = centreFreq + (bandwidth - channelWidth) / 2
elif (type(bandwidth) is list):
lowFreq = centreFreq - bandwidth[0]
highFreq = centreFreq + bandwidth[1]
cFreq = lowFreq
chanNum = 1
while (cFreq <= highFreq):
c.append(cFreq)
v.append(0.0)
n.append(0)
f.append(False)
p.append(0.0)
x.append(chanNum)
cFreq += channelWidth
chanNum += 1
nx = np.array(x)
for i in xrange(0, len(sideBands['LSB'])):
if ((centreFreq >= sideBands['LSB'][i][0]) and
(centreFreq <= sideBands['LSB'][i][1])):
# This band is LSB, so we flip the channel numbers.
nx = nx[::-1]
break
return { 'centreFrequency': np.array(c), 'value': np.array(v),
'count': np.array(n), 'flags': f,
'flaggedBandwidth': p,
'channelWidth': channelWidth, 'channelNumber': nx }
def averageTemplate(template):
# Return the average unflagged value of a template.
varr = np.array([])
for i in xrange(0, len(template['value'])):
if (template['flags'][i] == False):
varr = np.append(varr, template['value'][i])
return np.mean(varr)
def getFreq(item):
return item[0]
def frequencyToWavelength(freq):
# Convert a frequency in MHz to a wavelength in metres.
wl = speedoflight / (freq * mhzToHz)
return (wl)
def baselineToLambda(freq, baselineLength):
# Convert a basline length in metre to lambda for a specified frequency.
wl = frequencyToWavelength(freq)
bl = baselineLength / wl
return (bl)
def primaryBeamSize(freq, diam):
# Calculate the size of the primary beam for an antenna with a specified diameter (m),
# at the specified frequency (MHz), in arcmin.
pbfwhm = math.degrees((speedoflight / diam) / (freq * mhzToHz)) * degreesToArcmin
tpbfwhm = "%.1f" % pbfwhm
return (float(tpbfwhm))
def synthesisedBeamSize(freq, baselineLength, dec, minHa, maxHa, weightFactor):
# Calculate the synthesised beam size given a frequency, baseline length,
# declination and the minimum and maximum hour angles observed.
# Returns an array where minor axis is the first element, and major
# FWHM is the second.
blX = baselineToLambda(freq, baselineLength['dX'])
blY = baselineToLambda(freq, baselineLength['dY'])
blZ = baselineToLambda(freq, baselineLength['dZ'])
# We need the hour angles in radians.
minHaRad = math.radians(minHa * 15.0)
maxHaRad = math.radians(maxHa * 15.0)
# We use the full TMS equation 4.1 here.
# Find the hour angle for maximum u.
umaxHa = math.degrees(math.atan2(blX, blY)) / 15.0
if (umaxHa < minHa):
umaxHa = minHa
if (umaxHa > maxHa):
umaxHa = maxHa
umaxHaRad = math.radians(umaxHa * 15.0)
umax = abs(math.sin(umaxHaRad) * blX + math.cos(umaxHaRad) * blY)
# Find the hour angle for maximum v.
vmaxHa = math.degrees(math.atan2(-1 * blY, blX)) / 15.0
if (vmaxHa < minHa):
vmaxHa = minHa
if (vmaxHa > maxHa):
vmaxHa = maxHa
vmaxHaRad = math.radians(vmaxHa * 15.0)
vmax = abs(-1 * math.sin(math.radians(dec)) * math.cos(vmaxHaRad) * blX +
math.sin(math.radians(dec)) * math.sin(vmaxHaRad) * blY +
math.cos(math.radians(dec)) * blZ)
# The resolution is simply the inverse of umax or vmax, but we also
# convert to arcseconds and multiply by the image weighting factor.
ures = math.degrees(1.0 / umax) * degreesToArcsec * weightFactor
# This next line will trigger a ZeroDivisionError if the source is
# on the celestial equator and we're in an EW array.
vres = math.degrees(1.0 / vmax) * degreesToArcsec * weightFactor
# We now take care of significant figures.
utmp = "%.2f" % ures
vtmp = "%.2f" % vres
ures = float(utmp)
vres = float(vtmp)
# Make the output with the minor axis first.
res = [ ures, vres ]
if (ures > vres):
res = [ vres, ures ]
return (res)
def ellipseArea(minor, major):
# Returns the area of an ellipse with specified minor and major
# axis lengths (in arcseconds), in sr.
mn = (minor / 2.0) / degreesToArcsec
mj = (major / 2.0) / degreesToArcsec
a = math.pi * mn * mj # in square degrees
asr = a / (180.0 / math.pi)**2 # in steradians
return (asr)
def brightnessTemperatureSensitivity(rms, synthBeam, freq):
# Calculate the brightness temperature sensitivity (K) of an observation
# with an RMS noise level (mJy/beam), a synthesised beam size (arcsec),
# and a frequency in MHz. This comes from TMS equation 1.2.
A = ellipseArea(synthBeam[0], synthBeam[1])
I = (rms / 1000.0) * jyToSI / A
wl = frequencyToWavelength(freq)
bt = wl * wl * I / (2.0 * boltzmann)
return (bt)
def readTsys(filename, lf, hf):
# Open the filename.
if (os.path.isfile(filename)):
# Read it.
d = np.loadtxt(filename)
# Sort it.
ds = sorted(d, key=getFreq)
# Split it.
startIndex = 0
endIndex = len(ds) - 1
llf = lf / 1000.0
hhf = hf / 1000.0
while ((ds[startIndex][0] < llf) and
(startIndex < (len(ds) - 1)) and
(ds[startIndex + 1][0] < llf)):
startIndex += 1
while ((ds[endIndex][0] > hhf) and
(endIndex > 0) and
(ds[endIndex - 1][0] > hhf)):
endIndex -= 1
if (startIndex == endIndex):
if (startIndex > 0):
startIndex -= 1
if (endIndex < (len(ds) - 1)):
endIndex += 1
dds = ds[startIndex:(endIndex + 1)]
c = [np.around(row[0] * 1000.0) for row in dds]
v = [(10 ** row[1]) for row in dds]
n = []
f = []
for i in xrange(0, len(v)):
n.append(1)
f.append(False)
return { 'centreFrequency': c, 'value': v, 'count': np.array(n),
'flags': f,
'channelWidth': 1.0 }
else:
raise CalcError("Can't find Tsys file %s." % filename)
def lowHigh(c, w):
return { 'low': c - (w / 2.0), 'high': c + (w / 2.0) }
def overlaps(a, b):
if (b['low'] >= a['high']):
return False
if (b['high'] <= a['low']):
return False
return True
def templateAverage(t):
# Divide the values by the counts.
for i in xrange(0, len(t['centreFrequency'])):
if (t['count'][i] > 0):
t['value'][i] /= float(t['count'][i])
if (isinstance(t['flags'][i], list) == True):
# Multiple input flags, we choose the most common.
ft = 0
ff = 0
for j in xrange(0, len(t['flags'][i])):
if (t['flags'][i][j]):
ft += 1
else:
ff += 1
if (ft > ff):
t['flags'][i] = True
else:
t['flags'][i] = False
def linearInterpolate(p1, p2, pi):
# Using information from p1 and p2, determine the value at pi.
run = p2['frequency'] - p1['frequency']
if (run != 0):
slope = (p2['value'] - p1['value']) / run
else:
slope = 0
nrun = pi['frequency'] - p1['frequency']
return p1['value'] + nrun * slope
def templateInterpolate(t):
# Interpolate values for channels with no counts.
# Get the array for where counts is 0 and not.
zeroes = np.where(t['count'] == 0)
good = np.where(t['count'] > 0)
# Get the arrays without zero counts.
cf = t['centreFrequency'][good]
vs = t['value'][good]
# And the values we need to interpolate for.
rf = t['centreFrequency'][zeroes]
# And then interpolate.
iv = np.interp(rf, cf, vs)
t['value'][zeroes] = iv
def templateFill(srcTemplate, destTemplate):
# Fill in a template spectrum with values from another template, and do it with
# a single pass of each array (no looping).
i = 0 # The index of the destination template bin
j = 0 # The index of the source template bin
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
dfs = lowHigh(destTemplate['centreFrequency'][i], destTemplate['channelWidth'])
while (i < len(destTemplate['centreFrequency']) and
j < len(srcTemplate['centreFrequency'])):
if (overlaps(dfs, sfs)):
destTemplate['value'][i] += srcTemplate['value'][j]
destTemplate['count'][i] += 1
if (destTemplate['count'][i] > 1):
if (destTemplate['count'][i] == 2):
t = [ destTemplate['flags'][i], srcTemplate['flags'][j] ]
destTemplate['flags'][i] = t
else:
destTemplate['flags'][i].append(srcTemplate['flags'][j])
else:
destTemplate['flags'][i] = srcTemplate['flags'][j]
j += 1
if (j < len(srcTemplate['centreFrequency'])):
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
elif (srcTemplate['centreFrequency'][j] < destTemplate['centreFrequency'][i]):
j += 1
if (j < len(srcTemplate['centreFrequency'])):
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
else:
i += 1
if (i < len(destTemplate['centreFrequency'])):
dfs = lowHigh(destTemplate['centreFrequency'][i], destTemplate['channelWidth'])
templateAverage(destTemplate)
# Check that the edges aren't empty
# Bottom edge.
if (destTemplate['count'][0] == 0):
# Have to interpolate from the source template.
dfs = lowHigh(destTemplate['centreFrequency'][0], destTemplate['channelWidth'])
j = 0
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
while (sfs['high'] < dfs['low']):
j += 1
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
j -= 1 # Because the breaking point is when the frequency goes too high.
i = 1
while (i < (len(destTemplate['count']) - 1) and destTemplate['count'][i] == 0):
i += 1
destTemplate['value'][0] = linearInterpolate({ 'value': srcTemplate['value'][j],
'frequency': srcTemplate['centreFrequency'][j] },
{ 'value': destTemplate['value'][i],
'frequency': destTemplate['centreFrequency'][i] },
{ 'frequency': destTemplate['centreFrequency'][0] } )
destTemplate['count'][0] = 1
# Top edge.
if (destTemplate['count'][-1] == 0):
dfs = lowHigh(destTemplate['centreFrequency'][-1], destTemplate['channelWidth'])
j = 0
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
while (sfs['high'] <= dfs['low']):
j += 1
if (j < len(srcTemplate['centreFrequency'])):
sfs = lowHigh(srcTemplate['centreFrequency'][j], srcTemplate['channelWidth'])
else:
j -= 1
break
# Break point is fine this time.
i = -2
while (destTemplate['count'][i] == 0 and (abs(i) < (len(destTemplate['centreFrequency'])))):
i -= 1
destTemplate['value'][-1] = linearInterpolate({ 'value': srcTemplate['value'][j],
'frequency': srcTemplate['centreFrequency'][j] },
{ 'value': destTemplate['value'][i],
'frequency': destTemplate['centreFrequency'][i] },
{ 'frequency': destTemplate['centreFrequency'][-1] } )
destTemplate['count'][-1] = 1
templateInterpolate(destTemplate)
def templateEfficiency():
# The template returned by this routine contains all the efficiencies for
# all the bands.
c = [ 900.0, 1200.0, 1500.0, 1800.0, 2100.0, 2300.0, 2500.0, 4400.0, 5900.0,
7400.0, 8800.0, 10600.0, 16000.0, 16500.0, 17000.0, 17500.0, 18000.0, 18500.0,
19000.0, 19500.0, 20000.0, 20500.0, 21000.0, 21500.0, 22000.0, 22500.0, 23000.0,
23500.0, 24000.0, 24500.0, 25000.0, 25400.0, 30000.0, 31000.0, 32000.0, 33000.0,
34000.0, 35000.0, 36000.0, 37000.0, 38000.0, 39000.0, 40000.0, 41000.0, 42000.0,
43000.0, 44000.0, 45000.0, 46000.0, 47000.0, 48000.0, 49000.0, 50000.0, 83781.1,
85556.2, 86834.3, 88680.5, 90526.6, 91946.7, 94005.9, 95852.1, 97272.2, 98976.3,
100254.4, 102200.0, 102300.0, 106432.0 ]
v = [ 0.57, 0.57, 0.60, 0.53, 0.43, 0.42, 0.44, 0.65, 0.72,
0.65, 0.64, 0.65, 0.58, 0.62, 0.63, 0.65, 0.67, 0.70,
0.68, 0.64, 0.64, 0.60, 0.53, 0.55, 0.54, 0.51, 0.51,
0.53, 0.49, 0.49, 0.46, 0.47, 0.60, 0.60, 0.60, 0.60,
0.60, 0.60, 0.60, 0.60, 0.60, 0.60, 0.60, 0.59, 0.58,
0.57, 0.56, 0.55, 0.54, 0.53, 0.52, 0.51, 0.50, 0.3297,
0.3065, 0.3020, 0.2856, 0.2689, 0.2670, 0.2734, 0.2727, 0.2521, 0.2403,
0.2336, 0.2322, 0.14, 0.14 ]
f = []
n = []
for i in xrange(0, len(c)):
n.append(1)
f.append(False)
return { 'centreFrequency': np.array(c), 'value': np.array(v),
'count': np.array(n), 'flags': f,
'channelWidth': 1.0 }
def fillAtmosphereTemplate(templateOpacity, templateTemperature, t, p, h):
# Calculate the opacity and atmospheric temperature at the zenith for each frequency
# in the template.
atmos = refract.calcOpacity(templateOpacity['centreFrequency'] * 1e6, math.radians(90.0), t, p, h)
templateOpacity['value'] = np.array(atmos['tau'])
templateTemperature['value'] = np.array(atmos['Tb'])
def plotTemplate(t, e, outname):
plt.clf()
plt.plot(t['centreFrequency'], t['value'])
plt.plot(e['centreFrequency'], (e['value']), "green")
plt.savefig(outname)
def plotSpectrum(template, conditions, outname):
# Plot the template spectrum we are passed with frequency on the x-axis.
# Initialise the plot.
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(111)
# These are the colours we can use for the different lines.
colours = [ "blue", "green", "red", "black", "yellow" ]
# Go through the conditions (usually weather conditions) and plot a line for each.
for i, c in enumerate(conditions):
ax.plot(template[c]['centreFrequency'], template[c]['value'], colours[i], label=c)
# Set the x-axis limits to be tight on the actual frequency range.
plt.xlim(template[c]['centreFrequency'][0], template[c]['centreFrequency'][-1])
plt.xlabel("Frequency [MHz]")
plt.ylabel("RMS noise level [mJy/beam]")
# Ensure that the x- and y-axes don't have an offset value.
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
# Put the legend with the condition names at the top of the plot outside the border.
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=len(c), mode="expand", borderaxespad=0.)
# Highlight the regions that are flagged in the template.
for i in xrange(0, len(template[conditions[0]]['flags'])):
if (template[conditions[0]]['flags'][i] == True):
x1 = template[conditions[0]]['centreFrequency'][i] - template[c]['channelWidth'] / 2.0
x2 = template[conditions[0]]['centreFrequency'][i] + template[c]['channelWidth'] / 2.0
plt.axvspan(x1, x2, alpha=0.2, edgecolor='none', facecolor='red')
plt.savefig(outname)
def flagTemplate(t, flagType, corrMode, edgeChan):
# Set the flags in the template.
if ((flagType == "continuum") or (flagType == "birdies")):
# This is channel based flagging.
flagSrc = channelFlagging[flagType]
if (corrMode in flagSrc):
flagSrc = flagSrc[corrMode]
for c in xrange(0, len(flagSrc)):
for i in xrange(0, len(t['flags'])):
if (t['channelNumber'][i] == flagSrc[c]):
t['flags'][i] = True
break
elif (flagType == "rfi"):
# This is frequency based flagging.
oldi = 0
flagSrc = frequencyFlagging[flagType]
for r in xrange(0, len(flagSrc)):
for i in xrange(oldi, len(t['flags'])):
f1 = t['centreFrequency'][i] - t['channelWidth'] / 2.0
f2 = t['centreFrequency'][i] + t['channelWidth'] / 2.0
if (f1 > flagSrc[r][1]):
break
t['flaggedBandwidth'][i] += frequencyOverlap(f1, f2, flagSrc[r][0], flagSrc[r][1])
oldi = i
# Check for which channels are above the percentage flagged cut.
for i in xrange(0, len(t['flags'])):
p = t['flaggedBandwidth'][i] / t['channelWidth']
if (p > 0.5):
t['flags'][i] = True
elif (flagType == "edge"):
# Flag edge channels.
for i in xrange(0, len(t['channelNumber'])):
cn = i + 1
rcn = len(t['channelNumber']) - i
if ((cn <= edgeChan) or
(rcn <= edgeChan)):
t['flags'][i] = True
def calculateSensitivity(rmsTemplate, nAnts):
# Given a template filled with the RMS noise in each channel in the continuum
# band, return the average sensitivity of the continuum band channels (the spectral
# RMS), and the spectral RMS divided by the bandwidth (the continuum RMS). This
# routine does this while respecting any flagging present in the template.
rmsTotal = 0.0
rmsZoomTotal = 0.0
rmsN = 0
rmsZoomN = 0
for i in xrange(0, len(rmsTemplate['value'])):
# We count flagged continuum channels in the zoom RMS calculation.
rmsZoomTotal += rmsTemplate['value'][i]
rmsZoomN += 1
if (rmsTemplate['flags'][i] == False):
rmsTotal += rmsTemplate['value'][i]
rmsN += 1
if (rmsN > 0):
rmsSpectral = rmsTotal / rmsN
totalBandwidth = rmsN * rmsTemplate['channelWidth']
rmsContinuum = rmsSpectral / math.sqrt(float(rmsN))
rmsZoom = rmsSpectral * math.sqrt(float(nZoomChannels))
else:
rmsSpectral = None
totalBandwidth = None
rmsContinuum = None
rmsZoom = None
# Calculate the SEFDs for each antenna and the array as a whole.
# We use TMS equation 1.6 for this and convert to Jy.
Aone = surfaceArea(antennaDiameter)
Aall = nAnts * Aone
sefdOne = 2.0 * boltzmann * rmsTemplate['systemp'] / (Aone * 1e-26)
sefdAll = 2.0 * boltzmann * rmsTemplate['systemp'] / (Aall * 1e-26)
return { 'rms': { 'spectral': rmsSpectral, 'continuum': rmsContinuum, 'zoom': rmsZoom },
'bandwidth': { 'unflagged': totalBandwidth }, 'sefd': { 'antenna': sefdOne,
'array': sefdAll } }
def calculateRms(tsys, efficiency, opacity, temperature, minHa, maxHa, perHa, nAntenna,
totalTime, weighting, sind, cosd):
# Given the tsys and efficiency templates, the number of antennas involved in the
# imaging, the total integration time and the image weighting scheme, this routine
# will return another template with each channel being the RMS noise expected in
# that channel. This comes from eqn 6.62 of TMS, where eta_Q is 1 (for CABB's
# digitisation) but A is multiplied by our efficiency factor. That equation is
# for only a single polarisation though, so for an unpolarised source, the noise
# level is sqrt(2) lower, which is where the sqrt(2) factor in the numerator comes from
# instead of the 2.
# The variables for our output template.
c = []
v = []
n = []
f = []
# Figure out how much time is spent in each integration period.
haRange = maxHa - minHa
nIntegrations = math.ceil(haRange * perHa)
intTime = totalTime / nIntegrations
Texcess = []
systemTemperature = []
for j in xrange(0, int(nIntegrations + 1)):
# The hour angle at this integration.
jHa = minHa + float(j) / perHa
# The elevation at this hour angle.
cosha = math.cos(math.radians(jHa * 15.0))
sinel = sinl * sind + cosl * cosd * cosha
# Calculate the excess temperature due to the atmosphere
# and CMB.
elFactor = np.exp(-1.0 * opacity['value'] / sinel)
cbFactor = 2.7 * elFactor
ivFactor = 1.0 - elFactor
atFactor = temperature['value'] * ivFactor
Texcess.append(atFactor + cbFactor)
for i in xrange(0, len(tsys['centreFrequency'])):
# Check that the frequencies are the same in both templates.
if (tsys['centreFrequency'][i] == efficiency['centreFrequency'][i]):
# All's good.
c.append(tsys['centreFrequency'][i])
# Get the average excess temperature now.
exTemp = [ row[i] for row in Texcess ]
excessTemp = np.sum(exTemp) / float(len(exTemp))
Tmeas = tsys['value'][i] + excessTemp
TmeasEff = Tmeas / efficiency['value'][i]
# The units of this is actually mJy since we keep the frequency
# in MHz rather than converting to Hz (convenient isn't it!).
v.append((math.sqrt(2.0) * boltzmann * Tmeas * weighting['avg']) /
(1e-26 * surfaceArea(antennaDiameter) *
efficiency['value'][i] *
math.sqrt(float(nAntenna) * float(nAntenna - 1) *
tsys['channelWidth'] * (totalTime * 60.0))))
n.append(1)
if (tsys['flags'][i] or efficiency['flags'][i]):
f.append(True)
else:
f.append(False)
systemTemperature.append(TmeasEff)
return { 'centreFrequency': np.array(c), 'value': np.array(v),
'count': np.array(n), 'flags': f,
'channelWidth': tsys['channelWidth'], 'channelNumber': tsys['channelNumber'],
'systemp': np.mean(systemTemperature),
'systemTemperature': float("%.1f" % np.mean(systemTemperature))}
def surfaceArea(d):
# Given the diameter of a dish (m), return its surface area (m^2).
return (math.pi * ((d / 2.0) ** 2))
def weightingFactor(weighting, array, ant6):
# Return the w_rms / w_mean weighting factors given the weighting scheme,
# the array configuration and whether antenna 6 is included in the imaging.
# The weighting factors we determined from simulations.
factors = [
{ 'array': [ '6000', '6km', '3000', '3km' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.039, 'min': 1.000, 'max': 1.079, 'beam': 1.32 },
'R1': { 'avg': 1.040, 'min': 1.000, 'max': 1.080, 'beam': 1.32 },
'R0': { 'avg': 1.871, 'min': 1.350, 'max': 2.781, 'beam': 0.84 },
'R-1': { 'avg': 5.791, 'min': 3.685, 'max': 10.987, 'beam': 0.80 },
'R-2': { 'avg': 5.847, 'min': 3.688, 'max': 11.240, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.97 },
'R1': { 'avg': 1.002, 'min': 1.001, 'max': 1.004, 'beam': 0.89 },
'R0': { 'avg': 1.882, 'min': 1.703, 'max': 1.943, 'beam': 0.66 },
'R-1': { 'avg': 3.875, 'min': 2.543, 'max': 7.102, 'beam': 0.64 },
'R-2': { 'avg': 3.908, 'min': 2.562, 'max': 7.222, 'beam': 0.64 } } ] },
{ 'array': [ '1500', '1.5km' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.001, 'beam': 1.32 },
'R0': { 'avg': 1.507, 'min': 1.181, 'max': 1.846, 'beam': 0.84 },
'R-1': { 'avg': 7.925, 'min': 5.200, 'max': 16.732, 'beam': 0.80 },
'R-2': { 'avg': 8.151, 'min': 5.163, 'max': 19.304, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.96 },
'R1': { 'avg': 1.001, 'min': 1.000, 'max': 1.003, 'beam': 0.88 },
'R0': { 'avg': 1.854, 'min': 1.576, 'max': 1.953, 'beam': 0.64 },
'R-1': { 'avg': 3.900, 'min': 2.524, 'max': 8.218, 'beam': 0.62 },
'R-2': { 'avg': 3.923, 'min': 2.506, 'max': 8.707, 'beam': 0.62 } } ] },
{ 'array': [ '750', '750m' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R0': { 'avg': 1.299, 'min': 1.143, 'max': 1.621, 'beam': 0.84 },
'R-1': { 'avg': 12.893, 'min': 8.581, 'max': 17.674, 'beam': 0.80 },
'R-2': { 'avg': 14.027, 'min': 8.882, 'max': 22.273, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.96 },
'R1': { 'avg': 1.001, 'min': 1.000, 'max': 1.002, 'beam': 0.88 },
'R0': { 'avg': 1.925, 'min': 1.850, 'max': 1.971, 'beam': 0.62 },
'R-1': { 'avg': 3.557, 'min': 2.578, 'max': 5.255, 'beam': 0.59 },
'R-2': { 'avg': 3.582, 'min': 2.583, 'max': 5.369, 'beam': 0.59 } } ] },
{ 'array': [ '367', 'EW352', 'EW367', 'EW352/367' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32 },
'R0': { 'avg': 1.077, 'min': 1.029, 'max': 1.157, 'beam': 0.84 },
'R-1': { 'avg': 18.304, 'min': 16.432, 'max': 17.498, 'beam': 0.80 },
'R-2': { 'avg': 31.295, 'min': 20.574, 'max': 52.204, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.09 },
'R1': { 'avg': 1.001, 'min': 1.000, 'max': 1.001, 'beam': 1.00 },
'R0': { 'avg': 1.917, 'min': 1.838, 'max': 1.965, 'beam': 0.68 },
'R-1': { 'avg': 3.271, 'min': 2.537, 'max': 4.639, 'beam': 0.64 },
'R-2': { 'avg': 3.298, 'min': 2.550, 'max': 4.718, 'beam': 0.64 } } ] },
{ 'array': [ 'h214', 'H214', 'h168', 'H168', 'h75', 'H75' ],
'factors': [
{ 'ca06': True,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32},
'R1': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 1.32},
'R0': { 'avg': 1.106, 'min': 1.023, 'max': 1.186, 'beam': 0.84},
'R-1': { 'avg': 16.865, 'min': 15.629, 'max': 18.294, 'beam': 0.80 },
'R-2': { 'avg': 26.926, 'min': 18.717, 'max': 58.094, 'beam': 0.80 } },
{ 'ca06': False,
'R2': { 'avg': 1.000, 'min': 1.000, 'max': 1.000, 'beam': 0.75 },
'R1': { 'avg': 1.001, 'min': 1.001, 'max': 1.002, 'beam': 0.74 },
'R0': { 'avg': 1.641, 'min': 1.529, 'max': 1.760, 'beam': 0.61 },
'R-1': { 'avg': 1.984, 'min': 1.753, 'max': 2.281, 'beam': 0.59 },
'R-2': { 'avg': 1.988, 'min': 1.755, 'max': 2.288, 'beam': 0.59 } } ] } ]
# Search through the array.
for f in xrange(0, len(factors)):
arrayFound = False
for i in xrange(0, len(factors[f]['array'])):
if (array == factors[f]['array'][i]):
arrayFound = True
break
if (arrayFound):
g = 1
if (ant6):
g = 0
if (weighting in factors[f]['factors'][g]):
return (factors[f]['factors'][g][weighting])
# We couldn't find the appropriate weighting factors.
raise CalcError("Weighting factors are not available.")
def maximumBaseline(array):
# Return the maximum baseline length for a named array, both on the track
# only, and including CA06.
stationLocations = {
'W0': [ -4752438.459, 2790321.299, -3200483.747 ],
'W2': [ -4752422.922, 2790347.675, -3200483.747 ],
'W4': [ -4752407.385, 2790374.052, -3200483.747 ],
'W6': [ -4752391.848, 2790400.428, -3200483.747 ],
'W8': [ -4752376.311, 2790426.804, -3200483.747 ],
'W10': [ -4752360.774, 2790453.181, -3200483.747 ],
'W12': [ -4752345.237, 2790479.557, -3200483.747 ],
'W14': [ -4752329.700, 2790505.934, -3200483.747 ],
'W16': [ -4752314.163, 2790532.310, -3200483.747 ],
'W32': [ -4752189.868, 2790743.321, -3200483.747 ],
'W45': [ -4752088.877, 2790914.767, -3200483.747 ],
'W64': [ -4751941.276, 2791165.342, -3200483.747 ],
'W84': [ -4751785.907, 2791429.106, -3200483.747 ],
'W98': [ -4751677.148, 2791613.741, -3200483.747 ],
'W100': [ -4751661.611, 2791640.117, -3200483.747 ],
'W102': [ -4751646.074, 2791666.493, -3200483.747 ],
'W104': [ -4751630.537, 2791692.870, -3200483.747 ],
'W106': [ -4751615.000, 2791719.246, -3200483.747 ],
'W109': [ -4751591.695, 2791758.810, -3200483.747 ],
'W110': [ -4751583.926, 2791771.999, -3200483.747 ],
'W111': [ -4751576.158, 2791785.187, -3200483.747 ],
'W112': [ -4751568.389, 2791798.375, -3200483.747 ],
'W113': [ -4751560.621, 2791811.563, -3200483.747 ],
'W124': [ -4751475.168, 2791956.633, -3200483.747 ],
'W125': [ -4751467.399, 2791969.821, -3200483.747 ],
'W128': [ -4751444.094, 2792009.386, -3200483.747 ],
'W129': [ -4751436.325, 2792022.574, -3200483.747 ],
'W140': [ -4751350.872, 2792167.644, -3200483.747 ],
'W147': [ -4751296.492, 2792259.961, -3200483.747 ],
'W148': [ -4751288.724, 2792273.149, -3200483.747 ],
'W163': [ -4751172.197, 2792470.972, -3200483.747 ],
'W168': [ -4751133.354, 2792536.913, -3200483.747 ],
'W172': [ -4751102.281, 2792589.666, -3200483.747 ],
'W173': [ -4751094.512, 2792602.854, -3200483.747 ],
'W182': [ -4751024.596, 2792721.547, -3200483.747 ],
'W189': [ -4750970.216, 2792813.865, -3200483.747 ],
'W190': [ -4750962.448, 2792827.053, -3200483.747 ],
'W195': [ -4750923.605, 2792892.994, -3200483.747 ],
'W196': [ -4750915.837, 2792906.182, -3200483.747 ],
'W392': [ -4749393.198, 2795491.050, -3200483.694 ],
'N2': [ -4751628.291, 2791727.075, -3200457.305 ],
'N5': [ -4751648.226, 2791738.818, -3200417.642 ],
'N7': [ -4751661.517, 2791746.647, -3200391.200 ],
'N11': [ -4751688.098, 2791762.304, -3200338.316 ],
'N14': [ -4751708.034, 2791774.047, -3200298.653 ] }
endStations = [
{ 'array': [ '6000', '6km', '3000', '3km' ],
'stations': [ 'W2', 'W196' ] },
{ 'array': [ '1500', '1.5km' ],
'stations': [ 'W98', 'W195' ] },
{ 'array': [ '750', '750m' ],
'stations': [ 'W98', 'W148' ] },
{ 'array': [ '367', 'EW367', 'EW352/367' ],
'stations': [ 'W104', 'W128' ] },
{ 'array': [ 'EW352' ],
'stations': [ 'W102', 'W125' ] },
{ 'array': [ 'h214', 'H214' ],
'stations': [ 'W98', 'W113', 'W104', 'N14' ] },
{ 'array': [ 'h168', 'H168' ],
'stations': [ 'W100', 'W111', 'W104', 'N11' ] },
{ 'array': [ 'h75', 'H75' ],
'stations': [ 'W104', 'W109', 'W104', 'N5' ] } ]
# Search through the array.
for s in xrange(0, len(endStations)):
if (array in endStations[s]['array']):
# Calculate the maximum dX, dY, dZ.
mdX = 0
mdY = 0
mdZ = 0
for i in xrange(0, len(endStations[s]['stations']) - 1):
for j in xrange(i + 1, len(endStations[s]['stations'])):
dYp = abs(stationLocations[endStations[s]['stations'][i]][0] -
stationLocations[endStations[s]['stations'][j]][0])
dXp = abs(stationLocations[endStations[s]['stations'][i]][1] -
stationLocations[endStations[s]['stations'][j]][1])
dX = dXp * math.cos(cangle) - dYp * math.sin(cangle)
dY = dXp * math.sin(cangle) + dYp * math.cos(cangle)
if (dX > mdX):
mdX = dX
if (dY > mdY):
mdY = dY
dZ = abs(stationLocations[endStations[s]['stations'][i]][2] -
stationLocations[endStations[s]['stations'][j]][2])
if (dZ > mdZ):
mdZ = dZ
mdX6 = 0
mdY6 = 0
mdZ6 = 0
for i in xrange(0, len(endStations[s]['stations'])):
dYp = abs(stationLocations[endStations[s]['stations'][i]][0] -
stationLocations['W392'][0])
dXp = abs(stationLocations[endStations[s]['stations'][i]][1] -
stationLocations['W392'][1])
dX = dXp * math.cos(cangle) - dYp * math.sin(cangle)
dY = dXp * math.sin(cangle) + dYp * math.cos(cangle)
if (dX > mdX6):
mdX6 = dX
if (dY > mdY6):
mdY6 = dY
dZ = abs(stationLocations[endStations[s]['stations'][i]][2] -
stationLocations['W392'][2])
if (dZ > mdZ6):
mdZ6 = dZ
return ( { 'track': { 'dX': mdX, 'dY': mdY, 'dZ': mdZ },
'ca06': { 'dX': mdX6, 'dY': mdY6, 'dZ': mdZ6 } } )
# We couldn't find the appropriate maximum baseline lengths.
raise CalcError("Baseline lengths are not available.")
def addToOutput(output, item, name, value, description, units):
# Add some information to the JSON output object.
if (isinstance(name, list) == False):
output[item][name] = value
output['description'][name] = description
if (units is not None):
output['units'][name] = units
else:
if (name[0] not in output[item]):
output[item][name[0]] = {}
output['description'][name[0]] = description
if (units is not None):
output['units'][name[0]] = units
output[item][name[0]][name[1]] = value
def bandwidthToVelocity(lfreq, bw, restfreq):
# Given a rest frequency in MHz, calculate the velocity span represented
# by the specified bandwidth (MHz) starting at some low frequency (MHz).
vr = bw * restfreq * speedoflight * mToKm / (lfreq * (lfreq + bw))
vro = float("%.3f" % vr)
return (vro)
| gpl-3.0 |
troycomi/microMS | GUICanvases/microMSModel.py | 1 | 34074 |
from PIL import ImageDraw, ImageFont
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
import os
import random
from scipy.spatial.distance import pdist
import numpy as np
from copy import deepcopy, copy
from GUICanvases import GUIConstants
from ImageUtilities import slideWrapper
from ImageUtilities import blobFinder
from ImageUtilities import blob
from ImageUtilities import TSPutil
from ImageUtilities.enumModule import Direction, StepSize
from ImageUtilities import blobList
from CoordinateMappers import supportedCoordSystems
class MicroMSModel(object):
'''
The model of a microMS experiment consisting of a slide, blob finder, and blobs
Performs several vital functions for interacting with each object and maintains a list of blobs
'''
def __init__(self, GUI):
'''
Initialize a new model setup. Slide starts as None.
The coordinateMapper is set as the first mapper of the supported mappers.
Also calls self.resetVariables to clear other instance variables.
GUI: the supporting GUI
'''
self.slide = None
self.coordinateMapper = supportedCoordSystems.supportedMappers[0]
self.GUI = GUI
self.resetVariables()
def setupMicroMS(self, filename):
'''
Loads an image and sets up a new session
filename: the image to load
'''
self.slide = slideWrapper.SlideWrapper(filename)
self.resetVariables()
def resetVariables(self):
'''
Clears and initializes all instance variables
'''
self.blobCollection = [blobList.blobList(self.slide) for i in range(10)]
self.setCurrentBlobs(0)
self.tempBlobs = None
self.histogramBlobs = None
self.histColors = None
self.coordinateMapper.clearPoints()
self.mirrorImage = False
self.showPatches = True
self.drawAllBlobs = False
self.showPrediction = False
self.showThreshold = False
def setCoordinateMapper(self, newMapper):
'''
Sets a new coordinate mapper and clears its points
newMapper: the new instance of coordinateMapper to use
'''
self.coordinateMapper = newMapper
self.coordinateMapper.clearPoints()
def saveEntirePlot(self, fileName, ROI = None):
'''
saves the entire slide image at the current zoom level
fileName: the file to write to
*NOTE: this can take a while to run and generate large files at max zoom
'''
#save the current size and position
size, pos = self.slide.size, self.slide.pos
if ROI is None:
#match size to whole slide, position at center
self.slide.size, self.slide.pos = \
(self.slide.dimensions[0]//2**self.slide.lvl,
self.slide.dimensions[1]//2**self.slide.lvl), \
(self.slide.dimensions[0]//2, self.slide.dimensions[1]//2)
else:
#limit to just the ROI bounds
minx = maxx = ROI[0][0]
miny = maxy = ROI[0][1]
for r in ROI:
minx = minx if minx < r[0] else r[0]
maxx = maxx if maxx > r[0] else r[0]
miny = miny if miny < r[1] else r[1]
maxy = maxy if maxy > r[1] else r[1]
self.slide.size = ((maxx-minx) // 2 **self.slide.lvl, (maxy-miny) //2**self.slide.lvl)
self.slide.pos = ((maxx-minx) // 2 + minx, (maxy-miny) //2 + miny)
self.slide.size = [int(i) for i in self.slide.size]
self.slide.pos = [int(i) for i in self.slide.pos]
#get whole image
wholeImg = self.slide.getImg()
draw = ImageDraw.Draw(wholeImg)
#markup image
linWid = 1 if 6-self.slide.lvl < 1 else 6-self.slide.lvl
tfont = ImageFont.truetype("arial.ttf",linWid+6)
#for each blob list
for ii in range(len(self.blobCollection)):
if self.blobCollection[ii].length() > 0:
drawnlbls = set()
drawlbl = self.blobCollection[ii].blobs[0].group is not None
#for each blob
for i,gb in enumerate(self.blobCollection[ii].blobs):
p = self.slide.getLocalPoint((gb.X,gb.Y))
rad = gb.radius/2**self.slide.lvl
#draw blob outline
draw.ellipse((p[0]-rad, p[1]-rad, p[0]+rad, p[1]+rad), outline=GUIConstants.MULTI_BLOB[ii])
#draw label if group exists
if drawlbl and gb.group not in drawnlbls:
draw.text((p[0]+10/2**self.slide.lvl,p[1]-10/2**self.slide.lvl),
str(gb.group),
font=tfont, fill=GUIConstants.EXPANDED_TEXT)
drawnlbls.add(gb.group)
#roi
if ROI is not None:
ROI = [self.slide.getLocalPoint(r) for r in ROI]
ROI.append(ROI[0])
draw.line([(x[0], x[1]) for x in ROI], fill = GUIConstants.ROI)
#save image
wholeImg.save(fileName)
#restore size and position
self.slide.size, self.slide.pos = size, pos
def saveCurrentBlobFinding(self, filename):
'''
Save the current blob finder and currently selected blob list
filename: file to save to
'''
#slide not set up
if self.slide is None:
return "No slide loaded"
#current list is empty
if self.blobCollection[self.currentBlobs].length() == 0:
return "List {} contains no blobs!".format(self.currentBlobs +1) #plus one for GUI display
#save blobs
self.blobCollection[self.currentBlobs].saveBlobs(filename)
return "Saved blob information of list {}".format(self.currentBlobs+1)
def saveHistogramBlobs(self, filename):
'''
Save up to 3 files for different histogram filters
filename: the filename to save
'''
#slide not set up
if self.slide is None:
return "No slide loaded"
#no histogram blobs to save
if self.histogramBlobs is None or len(self.histogramBlobs) == 0:
return "No histogram divisions provided"
#save different divisions
f, ex = os.path.splitext(filename)
for blbs in self.histogramBlobs:
if blbs.length() > 0:
blbs.saveBlobs('{}_{}_{}{}'.format(f, blbs.description, blbs.threshCutoff, ex))
return "Saved histogram divisions with base name {}".format(os.path.split(f)[1])
def saveAllBlobs(self, filename):
'''
Save each list of blobs in it's own list
filename: a full filename with extension. The list number will be added as such:
dir/test.txt -> dir/test_1.txt
'''
#slide not set up
if self.slide is None:
return "No slide loaded"
f, ex = os.path.splitext(filename)
#save each blob list
for i, blbs in enumerate(self.blobCollection):
if blbs.length() > 0:
blbs.saveBlobs('{}_{}{}'.format(f, i, ex))
return "Saved blobs with base name '{}'".format(os.path.split(f)[1])
def saveCoordinateMapper(self, filename):
'''
Save the current coordinate mapper
filename: file to save to
'''
#no fiducials trained
if len(self.coordinateMapper.pixelPoints) < 1:
return "No coordinates to save"
self.coordinateMapper.saveRegistration(filename)
return "Saved coordinate mapper"
def saveInstrumentPositions(self, filename, tspOpt, maxPoints = None):
'''
save positions of blobs in instrument coordinate system
fileName: file to save to
tspOpt: bool indicating whether or not to perform traveling salesman optimization
maxPoints: maximum number of blobs to save. Default (None) saves all
'''
#check if the file can be saved
if len(self.coordinateMapper.physPoints) < 2:
return "Not enough training points to save instrument file"
if self.blobCollection[self.currentBlobs].length() == 0:
return "No blobs to save"
#get current blob list
blobs = self.blobCollection[self.currentBlobs].blobs
#if maxPoints is valid
if maxPoints is not None and maxPoints > 0 and maxPoints < self.currentBlobLength():
#obtain a random sample of blobs
blobs = random.sample(blobs,maxPoints)
#if tspOpt is requested
if tspOpt == True:
#reorder visit order
soln = TSPutil.TSPRoute(blob.blob.getXYList(blobs))
blobs = [blobs[i] for i in soln]
#save list of blobs
self.coordinateMapper.saveInstrumentFile(filename,
blobs)
return "Saved instrument file of list {}".format(self.currentBlobs +1 )
def saveInstrumentRegistrationPositions(self, filename):
'''
Save fiducial locations in the instrument coordinate system
'''
if len(self.coordinateMapper.physPoints) < 2:
return "Not enough training points to save fiducial locations"
self.coordinateMapper.saveInstrumentRegFile(filename)
return "Saved instrument registration positions"
def loadCoordinateMapper(self,filename):
'''
load a prior registration file
changes the current mapper to the one specified in the file
filename: file to load
returns a status string to display, and the index of the new mapper
'''
#get old index
old = supportedCoordSystems.supportedMappers.index(self.coordinateMapper)
#get first line in file
reader = open(filename,'r')
line = reader.readline().strip()
reader.close()
#see if that is a name of a coordinatemapper
try:
i = supportedCoordSystems.supportedNames.index(line)
except:
return 'Unsupported instrument: {}'.format(line), old
#See if mapper has changed to warn the user
result = 'Loaded {} registration'.format(line)
if i != old:
result = 'Warning, changing instrument to {}'.format(line)
self.coordinateMapper = supportedCoordSystems.supportedMappers[i]
self.coordinateMapper.loadRegistration(filename)
return result, i
def loadBlobFinding(self, filename):
'''
Loads the blobs in the provided filename to the current list of blobs and sets the blobfinder to the previous values
filename: file to load
'''
self.blobCollection[self.currentBlobs].loadBlobs(filename)
return "Finished loading blob positions into list {}".format(self.currentBlobs+1)
def loadInstrumentPositions(self, filename):
'''
Load a instrument position file to the current blob list.
Will not have proper radius, but should retain the groups.
filename: file to load
'''
self.blobCollection[self.currentBlobs].blobs = \
self.coordinateMapper.loadInstrumentFile(filename)
self.blobCollection[self.currentBlobs].generateGroupLabels()
return "Finished loading instrument file into list {}".format(self.currentBlobs+1)
def currentBlobLength(self):
'''
Gets the length of the current blob list
'''
return self.blobCollection[self.currentBlobs].length()
def currentInstrumentExtension(self):
'''
Gets the instrument extension of the current coordinate mapper
'''
return self.coordinateMapper.instrumentExtension
def runGlobalBlobFind(self):
'''
Performs global blob finding on the current slide and sets to current blob list
'''
if self.slide is None:
return "No slide was open"
return self.blobCollection[self.currentBlobs].blobSlide() + " in list {}".format(self.currentBlobs+1)
def updateCurrentBlobs(self, newBlobs):
if not isinstance(newBlobs, blobList.blobList):
raise ValueError('New blobs must be a blobList')
#find first unused blob index
for i in range(len(self.blobCollection)):
if self.blobCollection[i].length() == 0:
#add new blobs
self.blobCollection[i] = newBlobs
self.setCurrentBlobs(i)
return
def distanceFilter(self, distance):
'''
filters the global blob list to remove blobs which are closer than 'distance' pixels
the prior list is stored in previous current index
distance: distance threshold
'''
if self.currentBlobLength() == 0:
return "No blobs to filter"
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs].distanceFilter(distance, verbose = True))
return "Finished distance filter in list {}".format(self.currentBlobs+1)
def roiFilter(self):
if self.currentBlobLength() == 0:
return "No blobs to filter"
if len(self.blobCollection[self.currentBlobs].ROI) < 3:
return "No ROI selected"
startLen = self.currentBlobLength()
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs].roiFilter())
endLen = self.currentBlobLength()
return "{} blobs removed, {} remain in list {}".format(startLen - endLen, endLen, self.currentBlobs+1)
def roiFilterInverse(self):
if self.currentBlobLength() == 0:
return "No blobs to filter"
if len(self.blobCollection[self.currentBlobs].ROI) < 3:
return "No ROI selected"
startLen = self.currentBlobLength()
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs].roiFilterInverse())
endLen = self.currentBlobLength()
return "{} blobs removed, {} remain in list {}".format(startLen - endLen, endLen, self.currentBlobs+1)
def hexPackBlobs(self, separation, layers, dynamicLayering = False):
'''
expands each blob into hexagonally closest packed positions
sep: minimum separation between points
layers: number of layers to generate
dynamicLayering: adjust the number of layers with the blob radius
'''
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs]\
.hexagonallyClosePackPoints(separation, layers, dynamicLayering = dynamicLayering))
def rectPackBlobs(self, separation, layers, dynamicLayering = False):
'''
expands each blob into rectangularly packed positions
sep: minimum separation between points
layers: number of layers to generate
dynamicLayering: adjust the number of layers with the blob radius
'''
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs]\
.rectangularlyPackPoints(separation, layers, dynamicLayering = dynamicLayering))
def circularPackBlobs(self, separation, maxShots, offset):
'''
expands each blob into circularly packed positions around the blob
sep: minimum separation between spots
shots: maximum number of spots to place around each blob
offset: offset from the current circumference,
offset > 0 places spots outside the current blob
'''
self.updateCurrentBlobs(self.blobCollection[self.currentBlobs]\
.circularPackPoints(separation, maxShots, offset, minSpots = 4))
def analyzeAll(self):
'''
if the current mapper is connected to an instrument, triggers analysis of all blobs currently found
'''
#get all pixel points and translate to motor coords
if self.currentBlobLength() == 0:
return "No targets currently selected"
if len(self.coordinateMapper.physPoints) <= 2:
return "Not enough training points"
if self.coordinateMapper.connectedInstrument is None or \
self.coordinateMapper.connectedInstrument.connected == False:
return "No connected instrument"
targets = list(map(lambda b: self.coordinateMapper.translate((b.X, b.Y)),
self.blobCollection[self.currentBlobs].blobs))
#send to connected instrument
self.coordinateMapper.connectedInstrument.collectAll(targets)
return "Finished collection"
def setBlobSubset(self, blobSubset):
'''
Sets the histogram blobs supplied by a histcanvas
blobSubset: an odd object, tuple of lists, first is a blobList, second a list of colors
'''
self.histogramBlobs = blobSubset[0]
self.histColors = blobSubset[1]
def reportSlideStep(self, direction, stepSize):
'''
Moves the slide in the specified direction, taking into account mirroring
direction: a slideWrapper.direction in the observed direction
stepSize: enum dictating if the step size
'''
if self.slide is not None:
if self.mirrorImage:
if direction == Direction.left:
self.slide.step(Direction.right, stepSize)
elif direction == Direction.right:
self.slide.step(Direction.left, stepSize)
else:
self.slide.step(direction, stepSize)
self.slide.step(direction, stepSize)
def testBlobFind(self):
'''
Performs a test blob find on the current position
Sets the zoom level to the maximum value to match test blob finding
'''
self.slide.lvl = 0
if self.slide is not None:
self.tempBlobs = self.blobCollection[self.currentBlobs].blobFinder.blobImg()
def setCurrentBlobs(self, ind):
'''
Sets the current blob index to the specified value
ind: integer value of list to show
'''
self.currentBlobs = ind
if self.GUI is not None:
self.GUI.setTitle(self.currentBlobs)
def reportSize(self, newSize):
'''
Sets the size of the slidewrapper to the specified value.
Sets the max number of pixels to 600 but keeps the aspect ratio
newSize = (width, height)
'''
w,h = newSize
factor = 600/max(w,h)
w, h = int(w*factor), int(h*factor)
self.slide.size = [w, h]
def getCurrentImage(self):
'''
gets the image to display, accounting for showing thresholds
'''
#show the threshold image produced by blobfinder helper method
if self.showThreshold:
im, num = blobFinder.blobFinder._blbThresh(self.slide.getImg(),
self.blobCollection[self.currentBlobs].blobFinder.colorChannel,
self.blobCollection[self.currentBlobs].blobFinder.threshold)
return im
#else, use current image view
else:
return self.slide.getImg()
def getPatches(self, limitDraw):
'''
Gets the patches of all blobs, registration marks and predicted points.
limitDraw: boolean toggle to limit the number of blobs to draw
'''
ptches = []
#nothing requested or nothing to show
if self.showPatches == False or self.slide is None:
return PatchCollection(ptches)
#temp blobs from blob finding test. Only drawn once and the only displayed thing
if self.tempBlobs is not None:
ptches = [plt.Circle((blb.X, blb.Y),
blb.radius,
color = GUIConstants.TEMP_BLOB_FIND,
linewidth = 1,
fill = False)
for blb in self.tempBlobs]
#reset temp blobs
self.tempBlobs = None
#return patches, if none to show match_original needs to be false
return PatchCollection(ptches, match_original=(len(ptches) != 0))
#draw predicted points from coordinate mapper
lineWid = 1 if 6-self.slide.lvl < 1 else 6-self.slide.lvl
if self.showPrediction and len(self.coordinateMapper.physPoints) >= 2:
points, inds = self.slide.getPointsInBounds(self.coordinateMapper.predictedPoints())
ptches.extend(
[plt.Circle(p, GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
color = GUIConstants.PREDICTED_POINTS,
linewidth = lineWid,
fill = False)
for p in points]
)
#draw fiducial labels, color blended by deviation
if len(self.coordinateMapper.physPoints) > 2:
deviations = self.coordinateMapper.squareErrors()
#scale between 0 and 1
mind = min(deviations)
maxd = max(deviations)
deviations = [ (x - mind) / (maxd - mind) for x in deviations]
else:
deviations = [ 0, 0 ]
good = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL)
bad = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL_WORST)
points, inds = self.slide.getPointsInBounds(self.coordinateMapper.pixelPoints)
for i,p in enumerate(points):
#blend color based on deviation
d = deviations[inds[i]]
col = tuple(d * x + (1-d) * y for x,y in zip(bad, good))
ptches.append(
plt.Circle(p, GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
color = col,
linewidth = lineWid,
fill=False)
)
#draw region of interest
ptches.extend(self.getROIPatches())
#draw histogram blobs
if self.histogramBlobs is not None and len(self.histogramBlobs) != 0:
for i, blbs in enumerate(self.histogramBlobs):
ptches.extend(blbs.getPatches(limitDraw, self.slide, self.histColors[i]))
#draw blobs
else:
#draw all blob lists with their own color
if self.drawAllBlobs == True:
for j, blobs in enumerate(self.blobCollection):
ptches.extend(blobs.getPatches(limitDraw, self.slide, GUIConstants.MULTI_BLOB[j]))
#show only the current blob list
else:
ptches.extend(self.blobCollection[self.currentBlobs].getPatches(limitDraw, self.slide,
GUIConstants.MULTI_BLOB[self.currentBlobs]))
#return list of patches as a patch collection, if none match_original must be false
return PatchCollection(ptches, match_original=(len(ptches) != 0))
def getROIPatches(self, newPoint = None, append = False):
ptches = []
tROI = self.blobCollection[self.currentBlobs].getROI(newPoint, GUIConstants.ROI_DIST *2**self.slide.lvl, append)
if len(tROI) > 1:
verts = []
for roi in tROI:
verts.append(self.slide.getLocalPoint(roi))
verts.append(self.slide.getLocalPoint(tROI[0]))
ptches.append(mpl.patches.PathPatch(Path(verts, None),
color = GUIConstants.ROI,
fill = False))
return ptches
def reportROI(self, point, append = False):
'''
Handles ROI additions and removals based on position
point: the point in global coordinates
'''
self.blobCollection[self.currentBlobs].ROI = \
self.blobCollection[self.currentBlobs].getROI(point,
GUIConstants.ROI_DIST *2**self.slide.lvl,
append)
def drawLabels(self, axes):
'''
draw text labels on the supplied axis. Assume the axis is displaying the
slide image and blobs of the current state of everything.
'''
if self.slide is None or self.showPatches == False:
return
#fiducial labels
lineWid = 1 if 6-self.slide.lvl < 1 else 6-self.slide.lvl
#draw fiducial labels, color blended by deviation
if len(self.coordinateMapper.physPoints) > 2:
deviations = self.coordinateMapper.squareErrors()
#scale between 0 and 1
mind = min(deviations)
maxd = max(deviations)
deviations = [ (x - mind) / (maxd - mind) for x in deviations]
else:
deviations = [ 0, 0 ]
good = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL)
bad = mpl.colors.colorConverter.to_rgb(GUIConstants.FIDUCIAL_WORST)
points, inds = self.slide.getPointsInBounds(self.coordinateMapper.pixelPoints)
for i,p in enumerate(points):
#blend color based on deviation
d = deviations[inds[i]]
col = tuple(d * x + (1-d) * y for x,y in zip(bad, good))
axes.text(p[0] + GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
p[1] - GUIConstants.FIDUCIAL_RADIUS/2**self.slide.lvl,
self.coordinateMapper.predictLabel(self.coordinateMapper.physPoints[inds[i]]),
fontsize = lineWid + 6,
fontweight='bold',
color = col,
bbox=dict(facecolor=GUIConstants.FIDUCIAL_LABEL_BKGRD))
#show group labels
#hist blobs have no text
if self.histogramBlobs is not None and len(self.histogramBlobs) != 0:
pass
#normal blobs can have group labels
else:
pass
#show group names of all lists
if self.drawAllBlobs == True:
for blobs in self.blobCollection:
self._drawBlobLabels(axes, blobs, lineWid)
#show only the current list
else:
self._drawBlobLabels(axes, self.blobCollection[self.currentBlobs], lineWid)
def _drawBlobLabels(self, axes, blobs, lineWid):
'''
Helper method to draw blob labels onto the provided axis
axes: matplotlib axes to draw text to
blobs: blobList with labels to draw
lineWid: the linewidth to use for drawing
'''
#get grouplabels from blobs
labels = list(blobs.groupLabels.keys())
pos = list(blobs.groupLabels.values())
if len(labels) != 0:
points, inds = self.slide.getPointsInBounds(pos)
for i,p in enumerate(points):
#add offset from normal position
axes.text(p[0]+GUIConstants.DEFAULT_RADIUS/2**self.slide.lvl,
p[1]-GUIConstants.DEFAULT_RADIUS/2**self.slide.lvl,
labels[inds[i]],
fontsize=lineWid+6,
color=GUIConstants.EXPANDED_TEXT)
def reportInfoRequest(self, localPoint):
'''
Handles a request for image/blob information at the supplied local point
localPoint: (x,y) tuple of the query point in the local coordinate space
of the slide image
returns a string description of the point
'''
#nothing to query against
if self.slide is None:
return "No slide loaded"
point = self.slide.getGlobalPoint(localPoint)
#if the histogram canvas is shown, highlight that blob's location
if self.GUI is not None and self.GUI.showHist:
#find blob if user clicked in bounds
if self.blobCollection[self.currentBlobs] is not None and \
self.blobCollection[self.currentBlobs].length() > 0:
points, inds = self.slide.getPointsInBounds(blob.blob.getXYList(self.blobCollection[self.currentBlobs].blobs))
found = False
for i,p in enumerate(points):
#see if click point is within radius
if (localPoint[0]-p[0])**2 + (localPoint[1] - p[1])**2 <= \
(self.blobCollection[self.currentBlobs].blobs[inds[i]].radius/2**self.slide.lvl)**2:
self.GUI.histCanvas.singleBlob = inds[i]
found = True
break
#if not found, set to None
if not found:
self.GUI.histCanvas.singleBlob = None
#get pixel color and alpha (discarded)
try:
r,g,b,a = self.slide.getImg().getpixel(localPoint)
except IndexError:
r,g,b = 0,0,0
#get the size and circ of an area > thresh if on blb view
if self.showThreshold:
area,circ = self.blobCollection[self.currentBlobs].blobFinder.getBlobCharacteristics(localPoint)
return "x = %d, y = %d r,g,b = %d,%d,%d\tArea = %d\tCirc = %.2f"%(point[0], point[1], r, g, b, area, circ)
#get fiducial localization error if in a fiducial
fle = self.coordinateMapper.getFLE(point, GUIConstants.FIDUCIAL_RADIUS)
if fle is not None:
return "x = %d, y = %d r,g,b = %d,%d,%d FLE: %d"%(point[0], point[1], r, g, b, fle)
#show rgb and x,y location
return "x = %d, y = %d r,g,b = %d,%d,%d"%(point[0], point[1], r, g, b)
def reportFiducialRequest(self, localPoint, removePoint, extras = None):
'''
handles a fiducial request.
localpoint: (x,y) tuple in the image coordinate system
removePoint: boolean toggle. If true, the closest fiducial is removed
extras: a debugging object to bypass GUI display. Must define text and ok
'''
#no slide to register against
if self.slide is None:
return "No slide loaded"
globalPos = self.slide.getGlobalPoint(localPoint)
#shift RMB to remove closest fiducial
if removePoint:
if len(self.coordinateMapper.physPoints) == 0:
return "No points to remove"
self.coordinateMapper.removeClosest(globalPos)
return "Removed fiducial"
#get physical location from user
else:
#mapper returns predicted location
predicted = self.coordinateMapper.predictName(globalPos)
#prompt user
if self.GUI is None and extras is None:
return "No input provided"
if extras is not None:#make this check first for debugging
text = extras.text
ok = extras.ok
elif self.GUI is not None:
text, ok = self.GUI.requestFiducialInput(predicted)
if ok:
#validate entry
if self.coordinateMapper.isValidEntry(text):
#add position to mapper
dev = self.coordinateMapper.addPoints(globalPos,
self.coordinateMapper.extractPoint(text))
if dev is None:
return "%s added at %d,%d" % (text, globalPos[0], globalPos[1])
else:
return "%s added at %d,%d (FLE: %d)" % (text, globalPos[0], globalPos[1], dev)
else:
return "Invalid entry: {}".format(text)
def reportBlobRequest(self, localPoint, radius):
'''
Tries to add the blob to the current blob list.
If overlap with current blob, remove that point
localPoint: (x,y) tuple in the image coordinate space
radius: the radius of the new blob to be added
'''
#no slide to add blobs onto
if self.slide is None:
return "No slide loaded"
globalPnt = self.slide.getGlobalPoint(localPoint)
added, removeInd = self.blobCollection[self.currentBlobs].blobRequest(globalPnt, radius)
if added == True:
if self.GUI is not None and self.GUI.showHist:
self.GUI.toggleHistWindow()
return "Adding blob at {}, {}".format(globalPnt[0], globalPnt[1])
else:
if self.GUI is not None and self.GUI.showHist:
self.GUI.histCanvas.removeBlob(removeInd)
return "Removed blob at {}, {}".format(globalPnt[0], globalPnt[1])
def requestInstrumentMove(self, localPoint):
'''
Handles requests for moving the connected instrument
localPoint: (x,y) tuple in the current image coordinate system
returns a string summarizing the effect of the action
'''
#no slide is set up
if self.slide is None:
return "No slide loaded"
#the connected instrument isn't initialized or present
if self.coordinateMapper.connectedInstrument is None or \
not self.coordinateMapper.connectedInstrument.connected:
return "Instrument not connected"
#perform actual movement
pixelPnt = self.slide.getGlobalPoint(localPoint)
if len(self.coordinateMapper.physPoints) >= 2:
motorPnt = self.coordinateMapper.translate(pixelPnt)
self.coordinateMapper.connectedInstrument.moveToPositionXY(motorPnt)
return "Moving to {:.0f}, {:.0f}".format(motorPnt[0], motorPnt[1])
#not enough registration points
else:
return "Not enough training points" | mit |
llondon6/kerr_public | kerr/mapqnms.py | 1 | 35883 |
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
'''Class for boxes in complex frequency space'''
# The routines of this class assist in the solving and classification of
# QNM solutions
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #
class cwbox:
# ************************************************************* #
# This is a class to fascilitate the solving of leaver's equations varying
# the real and comlex frequency components, and optimizing over the separation constants.
# ************************************************************* #
def __init__(this,
l,m, # QNM indeces
cwr, # Center coordinate of real part
cwc, # Center coordinate of imag part
wid, # box width
hig, # box height
res = 50, # Number of gridpoints in each dimension
parent = None, # Parent of current object
sc = None, # optiional holder for separatino constant
verbose = False, # be verbose
maxn = None, # Overtones with n>maxn will be actively ignored. NOTE that by convention n>=0.
smallboxes = True, # Toggle for using small boxes for new solutions
**kwargs ):
#
from numpy import array,complex128,meshgrid,float128
#
this.verbose,this.res = verbose,res
# Store QNM ideces
this.l,this.m = l,m
# Set box params
this.width,this.height = None,None
this.setboxprops(cwr,cwc,wid,hig,res,sc=sc)
# Initial a list of children: if a box contains multiple solutions, then it is split according to each solutions location
this.children = [this]
# Point the object to its parent
this.parent = parent
#
this.__jf__ = []
# temp grid of separation constants
this.__scgrid__ = []
# current value of scalarized work-function
this.__lvrfmin__ = None
# Dictionary for high-level data: the data of all of this object's children is collected here
this.data = {}
this.dataformat = '{ ... (l,m,n,tail_flag) : { "jf":[...],"cw":[...],"sc":[...],"lvrfmin":[...] } ... }'
# Dictionary for low-level data: If this object is fundamental, then its data will be stored here in the same format as above
this.__data__ = {}
# QNM label: (l,m,n,t), NOTE that "t" is 0 if the QNM is not a power-law tail and 1 otherwise
this.__label__ = ()
# Counter for the number of times map hass benn called on this object
this.mapcount = 0
# Default value for temporary separation constant
this.__sc__ = 4.0
# Maximum overtone label allowed. NOTE that by convention n>=0.
this.__maxn__ = maxn
#
this.__removeme__ = False
#
this.__smallboxes__ = smallboxes
#################################################################
'''************************************************************ #
Set box params & separation constant center
# ************************************************************'''
#################################################################
def setboxprops(this,cwr,cwc,wid,hig,res,sc=None,data=None,pec=None):
# import maths and other
from numpy import complex128,float128,array,linspace
import matplotlib.patches as patches
# set props for box geometry
this.center = array([cwr,cwc])
this.__cw__ = cwr + 1j*cwc # Store cw for convinience
# Boxes may only shrink. NOTE that this is usefull as some poetntial solutions, or unwanted solutions may be reomved, and we want to avoid finding them again. NOTE that this would be nice to implement, but it currently brakes the root finding.
this.width,this.height = float128( abs(wid) ),float128( abs(hig) )
# if (this.width is None) or (this.height is None):
# this.width,this.height = float128( abs(wid) ),float128( abs(hig) )
# else:
# this.width,this.height = min(float128( abs(wid) ),this.width),min(this.height,float128( abs(hig) ))
this.limit = array([this.center[0]-this.width/2.0, # real min
this.center[0]+this.width/2.0, # real max
this.center[1]-this.height/2.0, # imag min
this.center[1]+this.height/2.0]) # imag max
this.wr_range = linspace( this.limit[0], this.limit[1], res )
this.wc_range = linspace( this.limit[2], this.limit[3], res )
# Set patch object for plotting. NOTE the negative sign exists here per convention
if None is pec: pec = 'k'
this.patch = patches.Rectangle( (min(this.limit[0:2]), min(-this.limit[2:4]) ), this.width, this.height, fill=False, edgecolor=pec, alpha=0.4, linestyle='dotted' )
# set holder for separation constant value
if sc is not None:
this.__sc__ = sc
# Initiate the data holder for this box. The data holder will contain lists of spin, official cw and sc values
if data is not None:
this.data=data
#################################################################
'''************************************************************ #
Map the potential solutions in this box
# ************************************************************'''
#################################################################
def map(this,jf):
# Import useful things
from kerr import localmins # finds local minima of a 2D array
from kerr.basics import alert,green,yellow,cyan,bold,magenta,blue
from numpy import array,delete,ones
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
# Add the input jf to the list of jf values. NOTE that this is not the primary recommended list for referencing jf. Please use the "data" field instead.
this.__jf__.append(jf)
#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#%%#
#
if this.verbose:
if this.parent is None:
alert('\n\n# '+'--'*40+' #\n'+blue(bold('Attempting to map qnm solutions for: jf = %1.8f'%(jf)))+'\n# '+'--'*40+' #\n','map')
else:
print '\n# '+'..'*40+' #\n'+blue('jf = %1.8f, label = %s'%(jf,this.__label__))+'\n# '+'..'*40+' #'
# Map solutions using discrete grid
if this.isfundamental():
# Brute-force calculate solutions to leaver's equations
if this.verbose: alert('Solvinq Leaver''s Eqns over grid','map')
this.__x__,this.__scgrid__ = this.lvrgridsolve(jf)
# Use a local-min finder to estimate the qnm locations for the grid of work function values, x
if this.verbose: alert('Searching for local minima. Ignoring mins on boundaries.','map')
this.__localmin__ = localmins(this.__x__,edge_ignore=True)
if this.verbose: alert('Number of local minima found: %s.'%magenta('%i'%(len(array(this.__localmin__)[0]))),'map')
# If needed, split the box into sub-boxes: Give the current box children!
this.splitcenter() # NOTE that if there is only one lcal min, then no split takes place
# So far QNM solutions have been estimates mthat have discretization error. Now, we wish to refine the
# solutions using optimization.
if this.verbose: alert('Refining QNM solution locations using a hybrid strategy.','map')
this.refine(jf)
else:
# Map solutions for all children
for child in [ k for k in this.children if this is not k ]:
child.map(jf)
# Collect QNM solution data for this BH spin. NOTE that only non-fundamental objects are curated
if this.verbose: alert('Collecting final QNM solution information ...','map')
this.curate(jf)
# Remove duplicate solutions
this.validatechildren()
#
if this.verbose: alert('Mapping of Kerr QNM with (l,m)=(%i,%i) within box now complete for this box.' % (this.l,this.m ) ,'map')
# Some book-keeping on the number of times this object has been mapped
this.mapcount += 1
# For the given bh spin, collect all QNM frequencies and separation constants within the current box
# NOTE that the outputs are coincident lists
def curate(this,jf):
#
from numpy import arange,array,sign
#
children = this.collectchildren()
cwlist,sclist = [ child.__cw__ for child in children ],[ child.__sc__ for child in children ]
if this.isfundamental():
cwlist.append( this.__cw__ )
sclist.append( this.__sc__ )
# sort the output lists by the imaginary part of the cw values
sbn = lambda k: abs( cwlist[k].imag ) # Sort By Overtone(N)
space = arange( len(cwlist) )
map_ = sorted( space, key=sbn )
std_cwlist = array( [ cwlist[k] for k in map_ ] )
std_sclist = array( [ sclist[k] for k in map_ ] )
# ---------------------------------------------------------- #
# Separate positive, zero and negative frequency solutions
# ---------------------------------------------------------- #
# Solutions with frequencies less than this value will be considered to be power-laws
pltol = 0.01
# Frequencies
sorted_cw_pos = list( std_cwlist[ (sign(std_cwlist.real) == sign(this.m)) * (abs(std_cwlist.real)>pltol) ] )
sorted_cw_neg = list( std_cwlist[ (sign(std_cwlist.real) ==-sign(this.m)) * (abs(std_cwlist.real)>pltol) ] )
sorted_cw_zro = list( std_cwlist[ abs(std_cwlist.real)<=pltol ] )
# Create a dictionary between (cw,sc) and child objects
A,B = {},{}
for child in children:
A[child] = ( child.__cw__, child.__sc__ )
B[ A[child] ] = child
#
def inferlabel( cwsc ):
cw,sc = cwsc[0],cwsc[1]
ll = this.l
if abs(cw.real)<pltol :
# power-law decay
tt = 1
nn = sorted_cw_zro.index( cw )
mm = this.m
else:
tt = 0
if sign(this.m)==sign(cw.real):
# prograde
mm = this.m
nn = sorted_cw_pos.index( cw )
else:
# retrograde
mm = -1 * this.m
nn = sorted_cw_neg.index( cw )
#
return (ll,mm,nn,tt)
# ---------------------------------------------------------- #
# Create a dictionary to keep track of potential solutions
# ---------------------------------------------------------- #
label = {}
for child in children:
cwsc = ( child.__cw__, child.__sc__ )
label[child] = inferlabel( cwsc )
child.__label__ = label[child]
#
this.label = label
'''
IMPORTANT: Here it is assumed that the solutions will change in a continuous manner, and that after the first mapping, no new solutions are of interest, unless a box-split occurs.
'''
# Store the high-level data product
for child in children:
L = this.label[child]
if not L in this.data:
this.data[ L ] = {}
this.data[ L ][ 'jf' ] = [jf]
this.data[ L ][ 'cw' ] = [ child.__cw__ ]
this.data[ L ][ 'sc' ] = [ child.__sc__ ]
this.data[ L ][ 'lvrfmin' ] = [ child.__lvrfmin__ ]
else:
this.data[ L ][ 'jf' ].append(jf)
this.data[ L ][ 'cw' ].append(child.__cw__)
this.data[ L ][ 'sc' ].append(child.__sc__)
this.data[ L ][ 'lvrfmin' ].append(child.__lvrfmin__)
# Store the information to this child also
child.__data__['jf'] = this.data[ L ][ 'jf' ]
child.__data__['cw'] = this.data[ L ][ 'cw' ]
child.__data__['sc'] = this.data[ L ][ 'sc' ]
child.__data__['lvrfmin'] = this.data[ L ][ 'lvrfmin' ]
# Refine the box center using fminsearch
def refine(this,jf):
# Import useful things
from numpy import complex128,array,linalg,log,exp,abs
from scipy.optimize import fmin,root,fmin_tnc,fmin_slsqp
from kerr.pttools import leaver_workfunction,scberti
from kerr.basics import alert,say,magenta,bold,green,cyan,yellow
from kerr import localmins # finds local minima of a 2D array
#
if this.isfundamental():
# use the box center for refined minimization
CW = complex128( this.center[0] + 1j*this.center[1] )
# SC = this.__sc__
SC = scberti( CW*jf, this.l, this.m )
state = [ CW.real,CW.imag, SC.real,SC.imag ]
#
retrycount,maxretrycount,done = -1,1,False
while done is False:
#
retrycount += 1
#
if retrycount==0:
alert(cyan('* Constructing guess using scberti-grid or extrap.'),'refine')
state = this.guess(jf,gridguess=state)
else:
alert(cyan('* Constructing guess using 4D-grid or extrap.'),'refine')
state = this.guess(jf)
# Solve leaver's equations using a hybrid strategy
cw,sc,this.__lvrfmin__,retry = this.lvrsolve(jf,state)
# If the root finder had some trouble, then mark this box with a warning (for plotting)
done = (not retry) or (retrycount>=maxretrycount)
#
if retry:
newres = 2*this.res
if this.verbose:
msg = yellow( 'The current function value is %s. Retrying root finding for %ind time with higher resolution pre-grid, and brute-force 4D.'%(this.__lvrfmin__, retrycount+2) )
alert(msg,'refine')
# say('Retrying.','refine')
# Increase the resolution of the box
this.setboxprops(this.__cw__.real,this.__cw__.imag,this.width,this.height,newres,sc=this.__sc__)
# NOTE that the commented out code below is depreciated by the use of guess() above.
# # Brute force solve again
# this.__x__,this.__scgrid__ = this.lvrgridsolve(jf,fullopt=True)
# # Use the first local min as a guess
# this.__localmin__ = localmins(this.__x__,edge_ignore=True)
# state = this.grids2states()[0]
# if this.verbose: print X.message+' The final function value is %s'%(this.__lvrfmin__)
if this.verbose: print 'The final function value is '+green(bold('%s'%(this.__lvrfmin__)))
if this.verbose:
print '\n\t Geuss cw: %s' % CW
print '\t Optimal cw: %s' % cw
print '\t Approx sc: %s' % scberti( CW*jf, this.l, this.m )
print '\t Geuss sc: %s' % (state[2]+1j*state[3])
print '\t Optimal sc: %s\n' % sc
# Set the core properties of the new box
this.setboxprops( cw.real, cw.imag, this.width,this.height,this.res,sc=sc )
# Rescale this object's boxes based on new centers
this.parent.sensescale()
else:
#
for child in [ k for k in this.children if this is not k ]:
child.refine(jf)
# Determine if the current object has more than itself as a child
def isfundamental(this):
return len(this.children) is 1
# ************************************************************* #
# Determin whether to split this box into sub-boxes (i.e. children)
# and if needed, split
# ************************************************************* #
def splitcenter(this):
from numpy import array,zeros,linalg,inf,mean,amax,amin,sqrt
from kerr.basics import magenta,bold,alert,error,red,warning,yellow
mins = this.__localmin__
num_solutions = len(array(mins)[0])
if num_solutions > 1: # Split the box
# for each min
for k in range(len(mins[0])):
# construct the center location
kr = mins[1][k]; wr = this.wr_range[ kr ]
kc = mins[0][k]; wc = this.wc_range[ kc ]
sc = this.__scgrid__[kr,kc]
# Determine the resolution of the new box
res = int( max( 20, 1.5*float(this.res)/num_solutions ) )
# Create the new child. NOTE that the child's dimensions will be set below using a standard method.
child = cwbox( this.l,this.m,wr,wc,0,0, res, parent=this, sc=sc, verbose=this.verbose )
# Add the new box to the current box's child list
this.children.append( child )
# NOTE that here we set the box dimensions of all children using the relative distances between them
this.sensescale()
# Now redefine the box size to contain all children
# NOTE that this step exists only to ensure that the box always contains all of its children's centers
children = this.collectchildren()
wr = array( [ child.center[0] for child in children ] )
wc = array( [ child.center[1] for child in children ] )
width = amax(wr)-amin(wr)
height = amax(wc)-amin(wc)
cwr = mean(wr)
cwc = mean(wc)
this.setboxprops( cwr,cwc,width,height,this.res,sc=sc )
elif num_solutions == 1:
# construcut the center location
k = 0 # there should be only one local min
kr = mins[1][k]
kc = mins[0][k]
wr = this.wr_range[ kr ]
wc = this.wc_range[ kc ]
# retrieve associated separation constant
sc = this.__scgrid__[kr,kc]
# Recenter the box on the current min
this.setboxprops(wr,wc,this.width,this.height,this.res,sc=sc)
else:
#
if len(this.__jf__)>3:
alert('Invalid number of local minima found: %s.'% (magenta(bold('%s'%num_solutions))), 'splitcenter' )
# Use the extrapolated values as a guess?
alert(yellow('Now trying to use extrapolation, wrather than grid guess, to center the current box.'),'splitcenter')
#
guess = this.guess(this.__jf__[-1],gridguess=[1.0,1.0,4.0,1.0])
wr,wc,cr,cc = guess[0],guess[1],guess[2],guess[3]
sc = cr+1j*cc
# Recenter the box on the current min
this.setboxprops(wr,wc,this.width,this.height,this.res,sc=sc)
else:
warning('Invalid number of local minima found: %s. This box will be removed. NOTE that this may not be what you want, and further inspection may be warranted.'% (magenta(bold('%s'%num_solutions))), 'splitcenter' )
this.__removeme__ = True
# Validate children: Remove duplicates
def validatechildren(this):
#
from numpy import linalg,array
from kerr import alert,yellow,cyan,blue,magenta
tol = 1e-5
#
if not this.isfundamental():
#
children = this.collectchildren()
initial_count = len(children)
# Remove identical twins
for a,tom in enumerate( children ):
for b,tim in enumerate( children ):
if b>a:
if linalg.norm(array(tom.center)-array(tim.center)) < tol:
tim.parent.children.remove(tim)
del tim
break
# Remove overtones over the max label
if this.__maxn__ is not None:
for k,child in enumerate(this.collectchildren()):
if child.__label__[2] > this.__maxn__:
if this.verbose:
msg = 'Removing overtone '+yellow('%s'%list(child.__label__))+' becuase its label is higher than the allowed value specified.'
alert(msg,'validatechildren')
this.label.pop( child.__label__ , None)
child.parent.children.remove(child)
del child
# Remove all boxes marked for deletion
for child in this.collectchildren():
if child.__removeme__:
this.label.pop( child.__label__, None )
child.parent.children.remove( child )
del child
#
final_count = len( this.collectchildren() )
#
if this.verbose:
if final_count != initial_count:
alert( yellow('%i children have been removed, and %i remain.') % (-final_count+initial_count,final_count) ,'validatechildren')
else:
alert( 'All children have been deemed valid.', 'validatechildren' )
# Method for collecting all fundamental children
def collectchildren(this,children=None):
#
if children is None:
children = []
#
if this.isfundamental():
children.append(this)
else:
for child in [ k for k in this.children if k is not this ]:
children += child.collectchildren()
#
return children
# Method to plot solutions
def plot(this,fig=None,show=False,showlabel=False):
#
from numpy import array,amin,amax,sign
from matplotlib.pyplot import plot,xlim,ylim,xlabel,ylabel,title,figure,gca,text
from matplotlib.pyplot import show as show_
#
children = this.collectchildren()
wr = array( [ child.center[0] for child in children ] )
wc =-array( [ child.center[1] for child in children ] )
wr_min,wr_max = amin(wr),amax(wr)
wc_min,wc_max = amin(wc),amax(wc)
padscale = 0.15
padr,padc = 1.5*padscale*(wr_max-wr_min), padscale*(wc_max-wc_min)
wr_min -= padr; wr_max += padr
wc_min -= padc; wc_max += padc
#
if fig is None:
# fig = figure( figsize=12*array((wr_max-wr_min, wc_max-wc_min))/(wr_max-wr_min), dpi=200, facecolor='w', edgecolor='k' )
fig = figure( figsize=12.0*array((4.5, 3))/4.0, dpi=200, facecolor='w', edgecolor='k' )
#
xlim( [wr_min,wr_max] )
ylim( [wc_min,wc_max] )
ax = gca()
#
for child in children:
plot( child.center[0],-child.center[1], '+k', ms=10 )
ax.add_patch( child.patch )
if showlabel:
text( child.center[0]+sign(child.center[0])*child.width/2,-(child.center[1]+child.height/2),
'$(%i,%i,%i,%i)$'%(this.label[child]),
ha=('right' if sign(child.center[0])<0 else 'left' ),
fontsize=10,
alpha=0.9 )
#
xlabel(r'$\mathrm{re}\;\tilde\omega_{%i%i}$'%(this.l,this.m))
ylabel(r'-$\mathrm{im}\;\tilde\omega_{%i%i}$'%(this.l,this.m))
title(r'$j_f = %1.6f$'%this.__jf__[-1],fontsize=18)
#
if show: show_()
# ************************************************************* #
# Solve leaver's equations in a given box=[wr_range,wc_range]
# NOTE that the box is a list, not an array
# ************************************************************* #
def lvrgridsolve(this,jf=0,fullopt=False):
# Import maths
from numpy import linalg,complex128,ones,array
from kerr.pttools import scberti
from kerr.pttools import leaver_workfunction
from scipy.optimize import fmin,root
import sys
# Pre-allocate an array that will hold work function values
x = ones( ( this.wc_range.size,this.wr_range.size ) )
# Pre-allocate an array that will hold sep const vals
scgrid = ones( ( this.wc_range.size,this.wc_range.size ), dtype=complex128 )
# Solve over the grid
for i,wr in enumerate( this.wr_range ):
for j,wc in enumerate( this.wc_range ):
# Costruct the complex frequency for this i and j
cw = complex128( wr+1j*wc )
# # Define the intermediate work function to be used for this iteration
# fun = lambda SC: linalg.norm( array(leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )) )
# # For this complex frequency, optimize over separation constant using initial guess
# SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
# SC0 = [SC0_.real,SC0_.imag]
# X = fmin( fun, SC0, disp=False, full_output=True, maxiter=1 )
# # Store work function value
# x[j][i] = X[1]
# # Store sep const vals
# scgrid[j][i] = X[0][0] + 1j*X[0][1]
if fullopt is False:
# Define the intermediate work function to be used for this iteration
fun = lambda SC: linalg.norm( array(leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )) )
# For this complex frequency, optimize over separation constant using initial guess
SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
SC0 = [SC0_.real,SC0_.imag]
# Store work function value
x[j][i] = fun(SC0)
# Store sep const vals
scgrid[j][i] = SC0_
else:
SC0_= scberti( cw*jf, this.l, this.m ) # Use Berti's analytic prediction as a guess
SC0 = [SC0_.real,SC0_.imag,0,0]
#cfun = lambda Y: [ Y[0]+abs(Y[3]), Y[1]+abs(Y[2]) ]
fun = lambda SC:leaver_workfunction( jf,this.l,this.m, [cw.real,cw.imag,SC[0],SC[1]] )
X = root( fun, SC0 )
scgrid[j][i] = X.x[0]+1j*X.x[1]
x[j][i] = linalg.norm( array(X.fun) )
if this.verbose:
sys.stdout.flush()
print '.',
if this.verbose: print 'Done.'
# return work function values AND the optimal separation constants
return x,scgrid
# Convert output of localmin to a state vector for minimization
def grids2states(this):
#
from numpy import complex128
state = []
#
for k in range( len(this.__localmin__[0]) ):
#
kr,kc = this.__localmin__[1][k], this.__localmin__[0][k]
cw = complex128( this.wr_range[kr] + 1j*this.wc_range[kc] )
sc = complex128( this.__scgrid__[kr,kc] )
#
state.append( [cw.real,cw.imag,sc.real,sc.imag] )
#
return state
# Get guess either from local min, or from extrapolation of past data
def guess(this,jf,gridguess=None):
#
from kerr.pttools import leaver_workfunction
from kerr.basics import alert,magenta,apolyfit
from kerr import localmins
from numpy import array,linalg,arange,complex128,allclose,nan
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# Get a guess from the localmin
if gridguess is None:
this.__x__,this.__scgrid__ = this.lvrgridsolve(jf,fullopt=True)
this.__localmin__ = localmins(this.__x__,edge_ignore=True)
guess1 = this.grids2states()[0]
else:
guess1 = gridguess
# Get a guess from extrapolation ( performed in curate() )
guess2 = [ v for v in guess1 ]
if this.mapcount > 3:
# if there are three map points, try to use polynomial fitting to determine the state at the current jf value
nn = len(this.__data__['jf'])
order = min(2,nn)
#
xx = array(this.__data__['jf'])[-4:]
#
yy = array(this.__data__['cw'])[-4:]
yr = apolyfit( xx, yy.real, order )(jf)
yc = apolyfit( yy.real, yy.imag, order )(yr)
cw = complex128( yr + 1j*yc )
#
zz = array(this.__data__['sc'])[-4:]
zr = apolyfit( xx, zz.real, order )(jf)
zc = apolyfit( zz.real, zz.imag, order )(zr)
sc = complex128( zr + 1j*zc )
#
guess2 = [ cw.real, cw.imag, sc.real, sc.imag ]
# Determine the best guess
if not ( allclose(guess1,guess2) ):
x1 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess1 ) )
x2 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess2 ) )
alert(magenta('The function value at guess from grid is: %s'%x1),'guess')
alert(magenta('The function value at guess from extrap is: %s'%x2),'guess')
if x2 is nan:
x2 = 100.0*x1
if x1<x2:
guess = guess1
alert(magenta('Using the guess from the grid.'),'guess')
else:
guess = guess2
alert(magenta('Using the guess from extrapolation.'),'guess')
else:
x1 = linalg.norm( leaver_workfunction( jf,this.l,this.m, guess1 ) )
guess = guess1
alert(magenta('The function value at guess from grid is %s'%x1),'guess')
# Return the guess solution
return guess
# Determine whether the current box contains a complex frequency given an iterable whose first two entries are the real and imag part of the complex frequency
def contains(this,guess):
#
cwrmin = min( this.limit[:2] )
cwrmax = max( this.limit[:2] )
cwcmin = min( this.limit[2:] )
cwcmax = max( this.limit[2:] )
#
isin = True
isin = isin and ( guess[0]<cwrmax )
isin = isin and ( guess[0]>cwrmin )
isin = isin and ( guess[1]<cwcmax )
isin = isin and ( guess[1]>cwcmin )
#
return isin
# Try solving the 4D equation near a single guess value [ cw.real cw.imag sc.real sc.imag ]
def lvrsolve(this,jf,guess,tol=1e-8):
# Import Maths
from numpy import log,exp,linalg,array
from scipy.optimize import root,fmin,minimize
from kerr.pttools import leaver_workfunction
from kerr import alert,red
# Try using root
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log( 1.0 + abs(array(leaver_workfunction( jf,this.l,this.m, STATE ))) )
X = root( fun, guess, tol=tol )
cw1,sc1 = X.x[0]+1j*X.x[1], X.x[2]+1j*X.x[3]
__lvrfmin1__ = linalg.norm(array( exp(X.fun)-1.0 ))
retry1 = ( 'not making good progress' in X.message.lower() ) or ( 'error' in X.message.lower() )
# Try using fmin
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log(linalg.norm( leaver_workfunction( jf,this.l,this.m, STATE ) ))
X = fmin( fun, guess, disp=False, full_output=True, ftol=tol )
cw2,sc2 = X[0][0]+1j*X[0][1], X[0][2]+1j*X[0][3]
__lvrfmin2__ = exp(X[1])
retry2 = this.__lvrfmin__ > 1e-3
# Use the solution that converged the fastest to avoid solutions that have wandered significantly from the initial guess OR use the solution with the smallest fmin
if __lvrfmin1__ < __lvrfmin2__ : # use the fmin value for convenience
cw,sc,retry = cw1,sc1,retry1
__lvrfmin__ = __lvrfmin1__
else:
cw,sc,retry = cw2,sc2,retry2
__lvrfmin__ = __lvrfmin2__
if not this.contains( [cw.real,cw.imag] ):
alert(red('Trial solution found to be outside of box. I will now try to use a bounded solver, but the performance may be suboptimal.'),'lvrsolve')
s = 2.0
cwrmin = min( this.center[0]-this.width/s, this.center[0]+this.width/s )
cwrmax = max( this.center[0]-this.width/s, this.center[0]+this.width/s )
cwcmin = min( this.center[1]-this.height/s, this.center[1]+this.height/s )
cwcmax = max( this.center[1]-this.height/s, this.center[1]+this.height/s )
scrmin = min( this.__sc__.real-this.width/s, this.__sc__.real+this.width/s )
scrmax = max( this.__sc__.real-this.width/s, this.__sc__.real+this.width/s )
sccmin = min( this.__sc__.imag-this.height/s, this.__sc__.imag+this.height/s )
sccmax = max( this.__sc__.imag-this.height/s, this.__sc__.imag+this.height/s )
bounds = [ (cwrmin,cwrmax), (cwcmin,cwcmax), (scrmin,scrmax), (sccmin,sccmax) ]
# Try using minimize
# Define the intermediate work function to be used for this iteration
fun = lambda STATE: log(linalg.norm( leaver_workfunction( jf,this.l,this.m, STATE ) ))
X = minimize( fun, guess, options={'disp':False}, tol=tol, bounds=bounds )
cw,sc = X.x[0]+1j*X.x[1], X.x[2]+1j*X.x[3]
__lvrfmin__ = exp(X.fun)
# Always retry if the solution is outside of the box
if not this.contains( [cw.real,cw.imag] ):
retry = True
alert(red('Retrying because the trial solution is outside of the box.'),'lvrsolve')
# Don't retry if fval is small
if __lvrfmin__ > 1e-3:
retry = True
alert(red('Retrying because the trial fmin value is greater than 1e-3.'),'lvrsolve')
# Don't retry if fval is small
if retry and (__lvrfmin__ < 1e-4):
retry = False
alert(red('Not retrying becuase the fmin value is low.'),'lvrsolve')
# Return the solution
return cw,sc,__lvrfmin__,retry
# Given a box's children, resize the boxes relative to child locations: no boxes overlap
def sensescale(this):
#
from numpy import array,inf,linalg,sqrt
from kerr import alert
#
children = this.collectchildren()
# Let my people know.
if this.verbose:
alert('Sensing the scale of the current object\'s sub-boxes.','sensescale')
# Determine the distance between this min, and its closest neighbor
scalar = sqrt(2) if (not this.__smallboxes__) else 2.0*sqrt(2.0)
for tom in children:
d = inf
for jerry in [ kid for kid in children if kid is not tom ]:
r = array(tom.center)
r_= array(jerry.center)
d_= linalg.norm(r_-r)
if d_ < d:
d = d_
# Use the smallest distance found to determine a box size
s = d/scalar
width = s; height = s; res = int( max( 20, 1.5*float(this.res)/len(children) ) ) if (len(children)>1) else this.res
# Define the new box size for this child
tom.setboxprops( tom.center[0], tom.center[1], width, height, res )
| mit |
zihua/scikit-learn | sklearn/metrics/classification.py | 3 | 71852 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = ['%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
rabipanda/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tseries/tests/test_period.py | 9 | 153010 | """Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from datetime import datetime, date, timedelta
from numpy.ma.testutils import assert_equal
from pandas import Timestamp
from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map
from pandas.tseries.period import Period, PeriodIndex, period_range
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
import pandas.tseries.offsets as offsets
import pandas.core.datetools as datetools
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip
from pandas import Series, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
class TestPeriodProperties(tm.TestCase):
"Test properties such as year, month, weekday, etc...."
#
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 4)
p = Period(ordinal=-2, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 3)
p = Period(ordinal=-2, freq='M')
self.assertEqual(p.year, 1969)
self.assertEqual(p.month, 11)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
self.assertIn('1989Q3', str(exp))
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
self.assertEqual(p, exp + 1)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
self.assertEqual(result, expected)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'M')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period('nat', freq='W-SUN')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'W-SUN')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period(tslib.iNaT, freq='D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'D')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
p = Period(tslib.iNaT, freq='3D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, offsets.Day(3))
self.assertEqual(p.freqstr, '3D')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertRaises(ValueError, Period, 'NaT')
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
self.assertEqual(p1.ordinal, p2.ordinal)
self.assertEqual(p1.freq, offsets.MonthEnd(3))
self.assertEqual(p1.freqstr, '3M')
self.assertEqual(p2.freq, offsets.MonthEnd())
self.assertEqual(p2.freqstr, 'M')
result = p1 + 1
self.assertEqual(result.ordinal, (p2 + 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
result = p1 - 1
self.assertEqual(result.ordinal, (p2 - 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='0M')
def test_timestamp_tz_arg(self):
tm._skip_if_no_pytz()
import pytz
for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='3H').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil(self):
from pandas.tslib import _dateutil_gettz as gettz
from pandas.tslib import maybe_get_tz
for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo',
'dateutil/US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
self.assertEqual(p.tz, gettz('Europe/Brussels'))
def test_timestamp_nat_tz(self):
t = Period('NaT', freq='M').to_timestamp()
self.assertTrue(t is tslib.NaT)
t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo')
self.assertTrue(t is tslib.NaT)
def test_timestamp_mult(self):
p = pd.Period('2011-01', freq='M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-01-31'))
p = pd.Period('2011-01', freq='3M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-03-31'))
def test_timestamp_nat_mult(self):
for freq in ['M', '3M']:
p = pd.Period('NaT', freq=freq)
self.assertTrue(p.to_timestamp(how='S') is pd.NaT)
self.assertTrue(p.to_timestamp(how='E') is pd.NaT)
def test_period_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEqual(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEqual(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/11/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/12/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEqual(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEqual(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEqual(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEqual(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i1 = Period('05Q1')
self.assertEqual(i1, i2)
lower = Period('05q1')
self.assertEqual(i1, lower)
i1 = Period('1Q2005')
self.assertEqual(i1, i2)
lower = Period('1q2005')
self.assertEqual(i1, lower)
i1 = Period('1Q05')
self.assertEqual(i1, i2)
lower = Period('1q05')
self.assertEqual(i1, lower)
i1 = Period('4Q1984')
self.assertEqual(i1.year, 1984)
lower = Period('4q1984')
self.assertEqual(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEqual(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEqual(i1, i2)
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period(200701, freq='M')
self.assertEqual(i1, expected)
i1 = Period(ordinal=200701, freq='M')
self.assertEqual(i1.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_period_constructor_offsets(self):
self.assertEqual(Period('1/1/2005', freq=offsets.MonthEnd()),
Period('1/1/2005', freq='M'))
self.assertEqual(Period('2005', freq=offsets.YearEnd()),
Period('2005', freq='A'))
self.assertEqual(Period('2005', freq=offsets.MonthEnd()),
Period('2005', freq='M'))
self.assertEqual(Period('3/10/12', freq=offsets.BusinessDay()),
Period('3/10/12', freq='B'))
self.assertEqual(Period('3/10/12', freq=offsets.Day()),
Period('3/10/12', freq='D'))
self.assertEqual(Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=1, freq='Q'))
self.assertEqual(Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=2, freq='Q'))
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day()),
Period(year=2005, month=3, day=1, freq='D'))
self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay()),
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day(3)),
expected)
self.assertEqual(Period(year=2005, month=3, day=1, freq='3D'),
expected)
self.assertEqual(Period(year=2012, month=3, day=10, freq=offsets.BDay(3)),
Period(year=2012, month=3, day=10, freq='3B'))
self.assertEqual(Period(200701, freq=offsets.MonthEnd()),
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1.year, 18695)
self.assertEqual(i2.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np.datetime64('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np.datetime64('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np.datetime64('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assertEqual(i1.freq, offsets.Minute())
self.assertEqual(i1.freqstr, 'T')
def test_repr(self):
p = Period('Jan-2000')
self.assertIn('2000-01', repr(p))
p = Period('2000-12-15')
self.assertIn('2000-12-15', repr(p))
def test_repr_nat(self):
p = Period('nat', freq='M')
self.assertIn(repr(tslib.NaT), repr(p))
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
tm.assertIsInstance(res, compat.text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
self.assertEqual(result, 4)
self.assertRaises(ValueError, left.__sub__,
Period('2007-01', freq='M'))
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEqual(start_ts, p.to_timestamp('D', how=a))
# freq with mult should not affect to the result
self.assertEqual(start_ts, p.to_timestamp('3D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEqual(end_ts, p.to_timestamp('D', how=a))
self.assertEqual(end_ts, p.to_timestamp('3D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
self.assertEqual(result, p)
self.assertEqual(p.start_time, p.to_timestamp(how='S'))
self.assertEqual(p.end_time, _ex(p))
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
self.assertEqual(result, expected)
result = p.to_timestamp('2T', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
self.assertEqual(result, expected)
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('S', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('5S', how='start')
self.assertEqual(result, expected)
p = Period('NaT', freq='W')
self.assertTrue(p.to_timestamp() is tslib.NaT)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
self.assertEqual(p.start_time, xp)
self.assertEqual(Period('2012', freq='B').start_time,
datetime(2012, 1, 2))
self.assertEqual(Period('2012', freq='W').start_time,
datetime(2011, 12, 26))
p = Period('NaT', freq='W')
self.assertTrue(p.start_time is tslib.NaT)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
self.assertEqual(xp, p.end_time)
xp = _ex(2012, 1, 2)
p = Period('2012', freq='D')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 1, 1)
p = Period('2012', freq='H')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 3)
self.assertEqual(Period('2012', freq='B').end_time, xp)
xp = _ex(2012, 1, 2)
self.assertEqual(Period('2012', freq='W').end_time, xp)
p = Period('NaT', freq='W')
self.assertTrue(p.end_time is tslib.NaT)
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
self.assertEqual(p.end_time, xp)
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
assert_equal(Period(freq='W', year=2012, month=2, day=1).days_in_month, 29)
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
with tm.assert_produces_warning(FutureWarning):
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
with tm.assert_produces_warning(FutureWarning):
exp = Period(freq='WK', year=2012, month=2, day=1)
assert_equal(exp.days_in_month, 29)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.dayofyear, 1)
assert_equal(b_date.days_in_month, 31)
assert_equal(Period(freq='B', year=2012, month=2, day=1).days_in_month, 29)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.dayofyear, 1)
assert_equal(d_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2,
day=1).days_in_month, 29)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.dayofyear, 1)
assert_equal(h_date.hour, 0)
assert_equal(h_date.days_in_month, 31)
assert_equal(Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month, 29)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.dayofyear, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
assert_equal(t_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month, 29)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.dayofyear, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
assert_equal(s_date.days_in_month, 31)
assert_equal(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
def test_properties_nat(self):
p_nat = Period('NaT', freq='M')
t_nat = pd.Timestamp('NaT')
# confirm Period('NaT') work identical with Timestamp('NaT')
for f in ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'dayofyear', 'quarter', 'days_in_month']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
self.assertTrue(np.isnan(getattr(t_nat, f)))
for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
def test_pnow(self):
dt = datetime.now()
val = period.pnow('D')
exp = Period(dt, freq='D')
self.assertEqual(val, exp)
val2 = period.pnow('2D')
exp2 = Period(dt, freq='2D')
self.assertEqual(val2, exp2)
self.assertEqual(val.ordinal, val2.ordinal)
self.assertEqual(val.ordinal, exp2.ordinal)
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
self.assertEqual(Period(year=2007, month=1, freq='2M'), expected)
self.assertRaises(ValueError, Period, datetime.now())
self.assertRaises(ValueError, Period, datetime.now().date())
self.assertRaises(ValueError, Period, 1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D')
self.assertRaises(ValueError, Period)
self.assertRaises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
self.assertEqual(result, exp)
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
self.assertEqual(p.freq, 'D')
p = Period('2007-01-01 07')
self.assertEqual(p.freq, 'H')
p = Period('2007-01-01 07:10')
self.assertEqual(p.freq, 'T')
p = Period('2007-01-01 07:10:15')
self.assertEqual(p.freq, 'S')
p = Period('2007-01-01 07:10:15.123')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123000')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123400')
self.assertEqual(p.freq, 'U')
def test_asfreq_MS(self):
initial = Period("2013")
self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M'))
self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S")
tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS')
self.assertTrue(_period_code_map.get("MS") is None)
def noWrap(item):
return item
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('W'), ival_W)
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
with tm.assert_produces_warning(FutureWarning):
ival_W = Period(freq='WK', year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning):
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
with tm.assert_produces_warning(FutureWarning):
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
with tm.assert_produces_warning(FutureWarning):
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
with tm.assert_produces_warning(FutureWarning):
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
with tm.assert_produces_warning(FutureWarning):
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
with tm.assert_produces_warning(FutureWarning):
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
with tm.assert_produces_warning(FutureWarning):
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
with tm.assert_produces_warning(FutureWarning):
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
with tm.assert_produces_warning(FutureWarning):
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('W'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('W'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('W'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('W'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('W'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
def test_asfreq_nat(self):
p = Period('NaT', freq='A')
result = p.asfreq('M')
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq='A', year=2007)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# mult freq to normal freq
p = Period(freq='3A', year=2007)
# ordinal will change because how=E is the default
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq)
expected = Period('2009', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2007-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='3A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2009-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
def test_asfreq_mult_nat(self):
# normal freq to mult freq
for p in [Period('NaT', freq='A'), Period('NaT', freq='3A'),
Period('NaT', freq='2M'), Period('NaT', freq='3D')]:
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('NaT', freq='3A')
self.assertEqual(result.ordinal, pd.tslib.iNaT)
self.assertEqual(result.freq, expected.freq)
result = p.asfreq(freq, how='S')
expected = Period('NaT', freq='3A')
self.assertEqual(result.ordinal, pd.tslib.iNaT)
self.assertEqual(result.freq, expected.freq)
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assertIsInstance(series, Series)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_numpy_array_equal(result, idx.values)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
self.assertTrue(index.equals(expected))
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
self.assertTrue(index.equals(expected))
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
self.assertTrue(index.equals(expected))
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
self.assertTrue(idx.equals(exp))
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_numpy_array_equal(pindex.year, years)
self.assert_numpy_array_equal(pindex.quarter, quarters)
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
self.assertTrue(result.equals(exp))
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
self.assertRaises(ValueError, PeriodIndex, idx.values)
self.assertRaises(ValueError, PeriodIndex, list(idx.values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx)
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='M')
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq=offsets.MonthEnd())
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, 'M')
result = PeriodIndex(idx, freq='2M')
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
self.assertTrue(result.equals(idx))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
self.assertTrue(result.equals(exp))
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=20, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
def test_constructor_nat(self):
self.assertRaises(
ValueError, period_range, start='NaT', end='2011-01-01', freq='M')
self.assertRaises(
ValueError, period_range, start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03', '2014-05', '2014-07'], freq='M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05', '2014-01-08', '2014-01-11',
'2014-01-14'], freq='D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it'
' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
def test_constructor_freq_mult_dti_compat(self):
import itertools
mults = [1, 2, 3, 4, 5]
freqs = ['A', 'M', 'D', 'T', 'S']
for mult, freq in itertools.product(mults, freqs):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr, periods=10).to_period(freq)
tm.assert_index_equal(pidx, expected)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
self.assertEqual(index.is_(index), True)
self.assertEqual(index.is_(create_index()), False)
self.assertEqual(index.is_(index.view()), True)
self.assertEqual(index.is_(index.view().view().view().view().view()), True)
self.assertEqual(index.view().is_(index), True)
ind2 = index.view()
index.name = "Apple"
self.assertEqual(ind2.is_(index), True)
self.assertEqual(index.is_(index[:]), False)
self.assertEqual(index.is_(index.asfreq('M')), False)
self.assertEqual(index.is_(index.asfreq('A')), False)
self.assertEqual(index.is_(index - 2), False)
self.assertEqual(index.is_(index - 0), False)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
def test_getitem_ndim2(self):
idx = period_range('2007-01', periods=3, freq='M')
result = idx[:, None]
# MPL kludge
tm.assertIsInstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assertTrue((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEqual(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEqual(len(result), 24)
result = ts[:'2009']
self.assertEqual(len(result), 36)
result = ts['2009':]
self.assertEqual(len(result), 50 - 24)
exp = result
result = ts[24:]
assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaisesRegexp(
KeyError, "left slice bound for non-unique label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
assert_series_equal(rs, ts)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
self.assertTrue(Period('2007-01', freq='M') in rng)
self.assertFalse(Period('2007-01', freq='D') in rng)
self.assertFalse(Period('2007-01', freq='2M') in rng)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
self.assertTrue(result.equals(exp))
def test_periods_number_check(self):
self.assertRaises(
ValueError, period_range, '2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assertIsInstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assertTrue(index.equals(recon))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
self.assertTrue(stamps.equals(expected))
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEqual(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEqual(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs=[Timestamp('99-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00',tz=None)]
for z in zs:
self.assertEqual( eval(repr(z)), z)
def test_to_timestamp_pi_nat(self):
# GH 7228
index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx')
result = index.to_timestamp('D')
expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1),
datetime(2011, 2, 1)], name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, 'idx')
result2 = result.to_period(freq='M')
self.assertTrue(result2.equals(index))
self.assertEqual(result2.name, 'idx')
result3 = result.to_period(freq='3M')
exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx')
self.assert_index_equal(result3, exp)
self.assertEqual(result3.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -2A')
with tm.assertRaisesRegexp(ValueError, msg):
result.to_period(freq='-2A')
def test_to_timestamp_pi_mult(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='2M', name='idx')
result = idx.to_timestamp()
expected = DatetimeIndex(['2011-01-01', 'NaT', '2011-02-01'], name='idx')
self.assert_index_equal(result, expected)
result = idx.to_timestamp(how='E')
expected = DatetimeIndex(['2011-02-28', 'NaT', '2011-03-31'], name='idx')
self.assert_index_equal(result, expected)
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assertEqual(expected, result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5)
rng.name = 'index'
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
self.assertTrue(rs.equals(rng))
rs = df.reset_index().set_index('index')
tm.assertIsInstance(rs.index, PeriodIndex)
self.assertTrue(rs.index.equals(rng))
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.set_index(idx2)
self.assertTrue(df.index.equals(idx2))
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
self.assertTrue(result.index.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
self.assertTrue(result.columns.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
# invalid axis
assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
self.assertTrue(isinstance(result1.columns, DatetimeIndex))
self.assertTrue(isinstance(result2.columns, DatetimeIndex))
self.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
self.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
self.assertEqual(result1.columns.freqstr, 'AS-JAN')
self.assertEqual(result2.columns.freqstr, 'AS-JAN')
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
assert_series_equal(result, expected)
result[:] = 1
self.assertTrue((ts[1:3] == 1).all())
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 365 * 9 + 2)
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 261 * 9)
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(pi), 365 * 24)
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(pi), 24 * 60)
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(pi), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
self.assertTrue(pi1.shift(0).equals(pi1))
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEqual(pi1.asfreq('Q', 'S'), pi2)
self.assertEqual(pi1.asfreq('Q', 's'), pi2)
self.assertEqual(pi1.asfreq('M', 'start'), pi3)
self.assertEqual(pi1.asfreq('D', 'StarT'), pi4)
self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5)
self.assertEqual(pi1.asfreq('Min', 'S'), pi6)
self.assertEqual(pi1.asfreq('S', 'S'), pi7)
self.assertEqual(pi2.asfreq('A', 'S'), pi1)
self.assertEqual(pi2.asfreq('M', 'S'), pi3)
self.assertEqual(pi2.asfreq('D', 'S'), pi4)
self.assertEqual(pi2.asfreq('H', 'S'), pi5)
self.assertEqual(pi2.asfreq('Min', 'S'), pi6)
self.assertEqual(pi2.asfreq('S', 'S'), pi7)
self.assertEqual(pi3.asfreq('A', 'S'), pi1)
self.assertEqual(pi3.asfreq('Q', 'S'), pi2)
self.assertEqual(pi3.asfreq('D', 'S'), pi4)
self.assertEqual(pi3.asfreq('H', 'S'), pi5)
self.assertEqual(pi3.asfreq('Min', 'S'), pi6)
self.assertEqual(pi3.asfreq('S', 'S'), pi7)
self.assertEqual(pi4.asfreq('A', 'S'), pi1)
self.assertEqual(pi4.asfreq('Q', 'S'), pi2)
self.assertEqual(pi4.asfreq('M', 'S'), pi3)
self.assertEqual(pi4.asfreq('H', 'S'), pi5)
self.assertEqual(pi4.asfreq('Min', 'S'), pi6)
self.assertEqual(pi4.asfreq('S', 'S'), pi7)
self.assertEqual(pi5.asfreq('A', 'S'), pi1)
self.assertEqual(pi5.asfreq('Q', 'S'), pi2)
self.assertEqual(pi5.asfreq('M', 'S'), pi3)
self.assertEqual(pi5.asfreq('D', 'S'), pi4)
self.assertEqual(pi5.asfreq('Min', 'S'), pi6)
self.assertEqual(pi5.asfreq('S', 'S'), pi7)
self.assertEqual(pi6.asfreq('A', 'S'), pi1)
self.assertEqual(pi6.asfreq('Q', 'S'), pi2)
self.assertEqual(pi6.asfreq('M', 'S'), pi3)
self.assertEqual(pi6.asfreq('D', 'S'), pi4)
self.assertEqual(pi6.asfreq('H', 'S'), pi5)
self.assertEqual(pi6.asfreq('S', 'S'), pi7)
self.assertEqual(pi7.asfreq('A', 'S'), pi1)
self.assertEqual(pi7.asfreq('Q', 'S'), pi2)
self.assertEqual(pi7.asfreq('M', 'S'), pi3)
self.assertEqual(pi7.asfreq('D', 'S'), pi4)
self.assertEqual(pi7.asfreq('H', 'S'), pi5)
self.assertEqual(pi7.asfreq('Min', 'S'), pi6)
self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo')
result1 = pi1.asfreq('3M')
result2 = pi1.asfreq('M')
expected = PeriodIndex(freq='M', start='2001-12', end='2001-12')
self.assert_numpy_array_equal(result1.asi8, expected.asi8)
self.assertEqual(result1.freqstr, '3M')
self.assert_numpy_array_equal(result2.asi8, expected.asi8)
self.assertEqual(result2.freqstr, 'M')
def test_asfreq_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M')
result = idx.asfreq(freq='Q')
expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q')
self.assertTrue(result.equals(expected))
def test_asfreq_mult_pi(self):
pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M')
for freq in ['D', '3D']:
result = pi.asfreq(freq)
exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT',
'2001-04-30'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
result = pi.asfreq(freq, how='S')
exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT',
'2001-03-01'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_asfreq_ts(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq('D', how='end')
df_result = df.asfreq('D', how='end')
exp_index = index.asfreq('D', how='end')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(exp_index))
self.assertTrue(df_result.index.equals(exp_index))
result = ts.asfreq('D', how='start')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(index.asfreq('D', how='start')))
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '0', 'A')
def test_negative_ordinals(self):
p = Period(ordinal=-1000, freq='A')
p = Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_numpy_array_equal(idx1,idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
pi1 = dti.to_period()
pi2 = dti.to_period(freq='D')
pi3 = dti.to_period(freq='3D')
self.assertEqual(pi1[0], Period('Jan 2005', freq='M'))
self.assertEqual(pi2[0], Period('1/31/2005', freq='D'))
self.assertEqual(pi3[0], Period('1/31/2005', freq='3D'))
self.assertEqual(pi1[-1], Period('Nov 2005', freq='M'))
self.assertEqual(pi2[-1], Period('11/30/2005', freq='D'))
self.assertEqual(pi3[-1], Period('11/30/2005', freq='3D'))
tm.assert_index_equal(pi1, period_range('1/1/2005', '11/1/2005', freq='M'))
tm.assert_index_equal(pi2, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('D'))
tm.assert_index_equal(pi3, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('3D'))
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01'], s[0:31])
assert_series_equal(s['2013/02'], s[31:59])
assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(KeyError):
s[v]
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/02':], s[1:])
assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
assert_series_equal(s['2013/02':], s[31:])
assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(IndexError):
idx[v:]
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d], s)
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660])
assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960])
assert_series_equal(s['2013/01/01 10H':], s[3600:])
assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D')
self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012]))
self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4]))
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
self.assertEqual(s['05Q4'], s[2])
def test_period_dt64_round_trip(self):
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period()
self.assertTrue(pi.to_timestamp().equals(dti))
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period(freq='H')
self.assertTrue(pi.to_timestamp().equals(dti))
def test_to_period_quarterly(self):
# make sure we can make the round trip
for month in MONTHS:
freq = 'Q-%s' % month
rng = period_range('1989Q3', '1991Q3', freq=freq)
stamps = rng.to_timestamp()
result = stamps.to_period(freq)
self.assertTrue(rng.equals(result))
def test_to_period_quarterlyish(self):
offsets = ['BQ', 'QS', 'BQS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'Q-DEC')
def test_to_period_annualish(self):
offsets = ['BA', 'AS', 'BAS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
offsets = ['MS', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
rng = date_range('01-Jan-2012', periods=8, freq='EOM')
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
self.assertEqual(result1.ordinal, result2.ordinal)
self.assertEqual(result1.freqstr, '2A-DEC')
self.assertEqual(result2.freqstr, 'A-DEC')
self.assertEqual(result1.freq, offsets.YearEnd(2))
self.assertEqual(result2.freq, offsets.YearEnd())
self.assertEqual((result1 + 1).ordinal, result1.ordinal + 2)
self.assertEqual((result1 - 1).ordinal, result2.ordinal - 2)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
tm.assertIsInstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assertIsInstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(3, 2, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with assertRaisesRegexp(ValueError, msg):
ts + ts.asfreq('D', how="end")
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
def test_union(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
self.assertTrue(result.equals(index))
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
self.assertTrue(result.equals(index))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.union, index2)
self.assertRaises(ValueError, index.join, index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
self.assertRaises(ValueError, index.join, index3)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
self.assertTrue(result.equals(index[10:-5]))
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
self.assertTrue(result.equals(index[10:-5]))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.intersection, index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
self.assertRaises(ValueError, index.intersection, index3)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'weekofyear', 'week', 'dayofweek',
'weekday', 'dayofyear', 'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert_equal(len(periodindex), len(field_idx))
for x, val in zip(periods, field_idx):
assert_equal(getattr(x, field), val)
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2006, 2007], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2005, 2005, 2007], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2005, 2006], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2006, 2005, 2005], freq='A')
self.assertRaises(ValueError, getattr, index, 'is_full')
self.assertTrue(index[:0].is_full)
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
self.assertTrue(result.equals(expected))
result = index.map(lambda x: x.ordinal)
exp = [x.ordinal for x in index]
tm.assert_numpy_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if compat.PY3:
# unicode
types += compat.text_type,
for t in types:
expected = np.array(lmap(t, raw), dtype=object)
res = index.map(t)
# should return an array
tm.assertIsInstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
# dtype should be object
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
tm.assert_numpy_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
tm.assertIsInstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
tm.assertIsInstance(s.index.levels[0], PeriodIndex)
tm.assertIsInstance(s.index.values[0][0], Period)
def test_to_datetime_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
result = index.to_datetime()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
self.assertRaises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
self.assertEqual(inst.args[0], bad_period)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
tm.assertIsInstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = self.round_trip_pickle(prng)
self.assertEqual(new_prng.freq, offsets.MonthEnd())
self.assertEqual(new_prng.freqstr, 'M')
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
self.assertEqual(idx.name, idx[1:].name)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
self.assertTrue(idx.equals(org))
def test_combine_first(self):
# GH 3367
didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M')
pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M')
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_searchsorted(self):
for freq in ['D', '2D']:
pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03',
'2014-01-04', '2014-01-05'], freq=freq)
p1 = pd.Period('2014-01-01', freq=freq)
self.assertEqual(pidx.searchsorted(p1), 0)
p2 = pd.Period('2014-01-04', freq=freq)
self.assertEqual(pidx.searchsorted(p2), 3)
msg = "Input has different freq=H from PeriodIndex"
with self.assertRaisesRegexp(ValueError, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='H'))
msg = "Input has different freq=5D from PeriodIndex"
with self.assertRaisesRegexp(ValueError, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='5D'))
def test_round_trip(self):
p = Period('2000Q1')
new_p = self.round_trip_pickle(p)
self.assertEqual(new_p, p)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestMethods(tm.TestCase):
"Base test class for MaskedArrays."
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert_equal(dt1 + 1, dt2)
#
# GH 4731
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq=freq))
self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq=freq))
self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq=freq))
self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq=freq))
self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq=freq))
self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq=freq))
self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq=freq))
self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq=freq))
self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq=freq))
self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq=freq))
self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq=freq))
self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p + o
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p + o
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p + o
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq=freq))
self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq=freq))
self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq=freq))
self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq=freq))
self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq=freq))
self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq=freq))
self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq=freq))
self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq=freq))
self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq=freq))
self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_nat_ops(self):
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertEqual((p - 1).ordinal, tslib.iNaT)
self.assertEqual((p - Period('2011-01', freq=freq)).ordinal, tslib.iNaT)
self.assertEqual((Period('2011-01', freq=freq) - p).ordinal, tslib.iNaT)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + 2
expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
result2 = result - 2
self.assertTrue(result2.equals(idx))
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
idx + "str"
def test_pi_ops_array(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = np.add(idx, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = idx - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'], freq='M', name='idx')
self.assert_index_equal(result, exp)
result = np.subtract(idx, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'], freq='M', name='idx')
self.assert_index_equal(result, exp)
# incompatible freq
msg = "Input has different freq from PeriodIndex\(freq=M\)"
with tm.assertRaisesRegexp(ValueError, msg):
idx + np.array([np.timedelta64(1, 'D')] * 4)
idx = PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00', 'NaT',
'2011-01-01 12:00'], freq='H', name='idx')
result = idx + np.array([np.timedelta64(1, 'D')] * 4)
exp = PeriodIndex(['2011-01-02 09:00', '2011-01-02 10:00', 'NaT',
'2011-01-02 12:00'], freq='H', name='idx')
self.assert_index_equal(result, exp)
result = idx - np.array([np.timedelta64(1, 'h')] * 4)
exp = PeriodIndex(['2011-01-01 08:00', '2011-01-01 09:00', 'NaT',
'2011-01-01 11:00'], freq='H', name='idx')
self.assert_index_equal(result, exp)
msg = "Input has different freq from PeriodIndex\(freq=H\)"
with tm.assertRaisesRegexp(ValueError, msg):
idx + np.array([np.timedelta64(1, 's')] * 4)
idx = PeriodIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', 'NaT',
'2011-01-01 12:00:00'], freq='S', name='idx')
result = idx + np.array([np.timedelta64(1, 'h'), np.timedelta64(30, 's'),
np.timedelta64(2, 'h'), np.timedelta64(15, 'm')])
exp = PeriodIndex(['2011-01-01 10:00:00', '2011-01-01 10:00:30', 'NaT',
'2011-01-01 12:15:00'], freq='S', name='idx')
self.assert_index_equal(result, exp)
class TestPeriodRepresentation(tm.TestCase):
"""
Wish to match NumPy units
"""
def test_annual(self):
self._check_freq('A', 1970)
def test_monthly(self):
self._check_freq('M', '1970-01')
def test_weekly(self):
self._check_freq('W-THU', '1970-01-01')
def test_daily(self):
self._check_freq('D', '1970-01-01')
def test_business_daily(self):
self._check_freq('B', '1970-01-01')
def test_hourly(self):
self._check_freq('H', '1970-01-01')
def test_minutely(self):
self._check_freq('T', '1970-01-01')
def test_secondly(self):
self._check_freq('S', '1970-01-01')
def test_millisecondly(self):
self._check_freq('L', '1970-01-01')
def test_microsecondly(self):
self._check_freq('U', '1970-01-01')
def test_nanosecondly(self):
self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
exp = np.arange(10, dtype=np.int64)
self.assert_numpy_array_equal(rng.values, exp)
def test_negone_ordinals(self):
freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S']
period = Period(ordinal=-1, freq='D')
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
self.assertEqual(period.year, 1969)
period = Period(ordinal=-1, freq='B')
repr(period)
period = Period(ordinal=-1, freq='W')
repr(period)
class TestComparisons(tm.TestCase):
def setUp(self):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
self.assertEqual(self.january1, self.january2)
def test_equal_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 == self.day
def test_notEqual(self):
self.assertNotEqual(self.january1, 1)
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
self.assertTrue(self.february > self.january1)
def test_greater_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 > self.day
def test_greater_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
self.assertTrue(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 >= self.day
with tm.assertRaises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
self.assertTrue(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 <= 1
def test_smaller(self):
self.assertTrue(self.january1 < self.february)
def test_smaller_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
self.assertEqual(sorted(periods), correctPeriods)
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat),
(nat, t), (t, nat), (nat, nat)]:
self.assertEqual(left < right, False)
self.assertEqual(left > right, False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
self.assertEqual(left <= right, False)
self.assertEqual(left >= right, False)
def test_pi_pi_comp(self):
for freq in ['M', '2M', '3M']:
base = PeriodIndex(['2011-01', '2011-02',
'2011-03', '2011-04'], freq=freq)
p = Period('2011-02', freq=freq)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base == p, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base != p, exp)
exp = np.array([False, False, True, True])
self.assert_numpy_array_equal(base > p, exp)
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(base < p, exp)
exp = np.array([False, True, True, True])
self.assert_numpy_array_equal(base >= p, exp)
exp = np.array([True, True, False, False])
self.assert_numpy_array_equal(base <= p, exp)
idx = PeriodIndex(['2011-02', '2011-01', '2011-03', '2011-05'], freq=freq)
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(base == idx, exp)
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(base != idx, exp)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base > idx, exp)
exp = np.array([True, False, False, True])
self.assert_numpy_array_equal(base < idx, exp)
exp = np.array([False, True, True, False])
self.assert_numpy_array_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base <= idx, exp)
# different base freq
msg = "Input has different freq=A-DEC from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
base <= Period('2011', freq='A')
with tm.assertRaisesRegexp(ValueError, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A')
base <= idx
# different mult
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
base <= Period('2011', freq='4M')
with tm.assertRaisesRegexp(ValueError, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M')
base <= idx
def test_pi_nat_comp(self):
for freq in ['M', '2M', '3M']:
idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq)
result = idx1 > Period('2011-02', freq=freq)
exp = np.array([False, False, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == Period('NaT', freq=freq)
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != Period('NaT', freq=freq)
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(result, exp)
diff = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='4M')
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(ValueError, msg):
idx1 > diff
with tm.assertRaisesRegexp(ValueError, msg):
idx1 == diff
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
rustychris/stompy | stompy/model/suntans/sun_driver.py | 1 | 113146 | import os
import glob
import copy
import subprocess
import six
from collections import defaultdict
import re
import xarray as xr
import numpy as np
import datetime
from matplotlib.dates import date2num, num2date
from ... import utils, memoize
#from ..delft import dflow_model as dfm
from .. import hydro_model as hm
from ..delft import dfm_grid
from ...grid import unstructured_grid
from ...spatial import linestring_utils
from . import store_file
import logging as log
try:
import pytz
utc = pytz.timezone('utc')
except ImportError:
log.warning("Couldn't load utc timezone")
utc = None
datenum_precision_per_s = 100 # 10ms - should be evenly divisible into 1e6
def dt_round(dt):
""" Given a datetime or timedelta object, round it to datenum_precision
"""
if isinstance(dt,datetime.timedelta):
td = dt
# days are probably fine
dec_seconds = td.seconds + 1e-6 * td.microseconds
# the correct number of time quanta
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000 // datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
return datetime.timedelta( days=td.days,
seconds = new_seconds,
microseconds = new_microseconds )
else:
# same deal, but the fields have slightly different names
# And the integer arithmetic cannot be used to count absolute seconds -
# that will overflow 32-bit ints (okay with 64, but better not
# to assume 64-bit ints are available)
dec_seconds = dt.second + 1e-6 * dt.microsecond
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000// datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
# to handle the carries between microseconds, seconds, days,
# construct an exact timedelta object - also avoids having to do
# int arithmetic with seconds over many days, which could overflow.
td = datetime.timedelta(seconds = new_seconds - dt.second,
microseconds = new_microseconds - dt.microsecond)
return dt + td
# certainly there is a better way to do this...
MultiBC=hm.MultiBC
StageBC=hm.StageBC
FlowBC=hm.FlowBC
VelocityBC=hm.VelocityBC
ScalarBC=hm.ScalarBC
SourceSinkBC=hm.SourceSinkBC
OTPSStageBC=hm.OTPSStageBC
OTPSFlowBC=hm.OTPSFlowBC
OTPSVelocityBC=hm.OTPSVelocityBC
HycomMultiVelocityBC=hm.HycomMultiVelocityBC
HycomMultiScalarBC=hm.HycomMultiScalarBC
NOAAStageBC=hm.NOAAStageBC
NwisFlowBC=hm.NwisFlowBC
NwisStageBC=hm.NwisStageBC
CdecFlowBC=hm.CdecFlowBC
CdecStageBC=hm.CdecStageBC
class GenericConfig(object):
""" Handles reading and writing of suntans.dat formatted files.
Older code I think was case-insensitive, but seems that it is
now case-sensitive.
"""
keys_are_case_sensitive=True
def __init__(self,filename=None,text=None):
""" filename: path to file to open and parse
text: a string containing the entire file to parse
"""
self.filename = filename
if filename:
fp = open(filename,'rt')
else:
fp = [s+"\n" for s in text.split("\n")]
self.entries = {}
self.originals = []
for line in fp:
# save original text so we can write out a new suntans.dat with
# only minor changes
self.originals.append(line)
i = len(self.originals)-1
m = re.match(r"^\s*((\S+)\s+(\S+))?\s*.*",line)
if m and m.group(1):
key = m.group(2)
if not self.keys_are_case_sensitive:
key=key.lower()
val = m.group(3)
self.entries[key] = [val,i]
if filename:
fp.close()
def copy(self):
# punt copy semantics and handling to copy module
return copy.deepcopy(self)
def conf_float(self,key):
return self.conf_str(key,float)
def conf_int(self,key,default=None):
x=self.conf_str(key,int)
if x is None:
return default
return x
def conf_str(self,key,caster=lambda x:x):
if not self.keys_are_case_sensitive:
key = key.lower()
if key in self.entries:
return caster(self.entries[key][0])
else:
return None
def __setitem__(self,key,value):
self.set_value(key,value)
def __getitem__(self,key):
return self.conf_str(key)
def __delitem__(self,key):
# if the line already exists, it will be written out commented, otherwise
# it won't be written at all.
self.set_value(key,None)
def __contains__(self,key):
return self[key] is not None
def get(self,key,default=None):
if key in self:
return self[key]
else:
return default
def __eq__(self,other):
return self.is_equal(other)
def is_equal(self,other,limit_to_keys=None):
# key by key equality comparison:
log.debug("Comparing two configs")
for k in self.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in other.entries:
log.debug("Other is missing key %s"%k)
return False
elif self.val_to_str(other.entries[k][0]) != self.val_to_str(self.entries[k][0]):
log.debug("Different values key %s => %s, %s"%(k,self.entries[k][0],other.entries[k][0]))
return False
for k in other.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in self.entries:
log.debug("other has extra key %s"%k)
return False
return True
def disable_value(self,key):
if not self.keys_are_case_sensitive:
key = key.lower()
if key not in self.entries:
return
old_val,i = self.entries[key]
self.originals[i] = "# %s"%(self.originals[i])
self.entries[key][0] = None
def val_to_str(self,value):
# make sure that floats are formatted with plenty of digits:
# and handle annoyance of standard Python types vs. numpy types
# But None stays None, as it gets handled specially elsewhere
if value is None:
return None
if isinstance(value,float) or isinstance(value,np.floating):
value = "%.12g"%value
else:
value = str(value)
return value
def set_value(self,key,value):
""" Update a value in the configuration. Setting an item to None will
comment out the line if it already exists, and omit the line if it does
not yet exist.
"""
if not self.keys_are_case_sensitive:
key = key.lower()
else:
if (key not in self.entries):
for other in self.entries:
if key.lower()==other.lower():
raise Exception("Probably a case-sensitive error: %s vs %s"%(key,other))
if key not in self.entries:
if value is None:
return
self.originals.append("# blank #")
i = len(self.originals) - 1
self.entries[key] = [None,i]
old_val,i = self.entries[key]
value = self.val_to_str(value)
if value is not None:
self.originals[i] = "%s %s # from sunreader code\n"%(key,value)
else:
self.originals[i] = "# " + self.originals[i]
self.entries[key][0] = value
def write_config(self,filename=None,check_changed=True,backup=True):
"""
Write this config out to a text file
filename: defaults to self.filename
check_changed: if True, and the file already exists and is not materially different,
then do nothing. Good for avoiding unnecessary changes to mtimes.
backup: if true, copy any existing file to <filename>.bak
"""
filename = filename or self.filename
if filename is None:
raise Exception("No clue about the filename for writing config file")
if check_changed:
if os.path.exists(filename):
existing_conf = self.__class__(filename)
if existing_conf == self:
log.debug("No change in config")
return
if os.path.exists(filename) and backup:
filename_bak = filename + ".bak"
os.rename(filename,filename_bak)
fp = open(filename,'wt')
for line in self.originals:
fp.write(line)
fp.close()
class SunConfig(GenericConfig):
def time_zero(self):
""" return python datetime for the when t=0 is"""
# try the old way, where these are separate fields:
start_year = self.conf_int('start_year')
start_day = self.conf_float('start_day')
if start_year is not None:
# Note: we're dealing with 0-based start days here.
start_datetime = datetime.datetime(start_year,1,1,tzinfo=utc) + dt_round(datetime.timedelta(start_day))
return start_datetime
# That failed, so try the other way
log.debug("Trying the new way of specifying t0")
s = self.conf_str('TimeZero') # 1999-01-01-00:00
start_datetime = datetime.datetime.strptime(s,'%Y-%m-%d-%H:%M')
start_datetime = start_datetime.replace(tzinfo=utc)
return start_datetime
def simulation_seconds(self):
return self.conf_float('dt') * self.conf_int('nsteps')
def timestep(self):
""" Return a timedelta object for the timestep - should be safe from roundoff.
"""
return dt_round( datetime.timedelta(seconds=self.conf_float('dt')) )
def simulation_period(self):
""" This is more naive than the SunReader simulation_period(), in that
it does *not* look at any restart information, just start_year, start_day,
dt, and nsteps
WARNING: this used to add an extra dt to start_date - maybe trying to make it
the time of the first profile output?? this seems like a bad idea. As of
Nov 18, 2012, it does not do that (and at the same time, moves to datetime
arithmetic)
return a pair of python datetime objects for the start and end of the simulation.
"""
t0 = self.time_zero()
# why did it add dt here???
# start_date = t0 + datetime.timedelta( self.conf_float('dt') / (24.*3600) )
# simulation_days = self.simulation_seconds() / (24.*3600)
# end_date = start_date + datetime.timedelta(simulation_days)
start_date = t0
end_date = start_date + self.conf_int('nsteps')*self.timestep()
return start_date,end_date
def copy_t0(self,other):
self.set_value('start_year',other.conf_int('start_year'))
self.set_value('start_day',other.conf_float('start_day'))
# def set_simulation_period(self,start_date,end_date):
# """ Based on the two python datetime instances given, sets
# start_day, start_year and nsteps
# """
# self.set_value('start_year',start_date.year)
# t0 = datetime.datetime( start_date.year,1,1,tzinfo=utc )
# self.set_value('start_day',date2num(start_date) - date2num(t0))
#
# # roundoff dangers here -
# # self.set_simulation_duration_days( date2num(end_date) - date2num(start_date))
# self.set_simulation_duration(delta=(end_date - start_date))
#
# def set_simulation_duration_days(self,days):
# self.set_simulation_duration(days=days)
# def set_simulation_duration(self,
# days=None,
# delta=None,
# seconds = None):
# """ Set the number of steps for the simulation - exactly one of the parameters should
# be specified:
# days: decimal number of days - DANGER - it's very easy to get some round-off issues here
# delta: a datetime.timedelta object.
# hopefully safe, as long as any differencing between dates was done with UTC dates
# (or local dates with no daylight savings transitions)
# seconds: total number of seconds - this should be safe, though there are some possibilities for
# roundoff.
#
# """
# print("Setting simulation duration:")
# print(" days=",days)
# print(" delta=",delta)
# print(" seconds=",seconds)
#
# # convert everything to a timedelta -
# if (days is not None) + (delta is not None) + (seconds is not None) != 1:
# raise Exception("Exactly one of days, delta, or seconds must be specified")
# if days is not None:
# delta = datetime.timedelta(days=days)
# elif seconds is not None:
# delta = datetime.timedelta(seconds=seconds)
#
# # assuming that dt is also a multiple of the precision (currently 10ms), this is
# # safe
# delta = dt_round(delta)
# print(" rounded delta = ",delta)
# timestep = dt_round(datetime.timedelta(seconds=self.conf_float('dt')))
# print(" rounded timestep =",timestep)
#
# # now we have a hopefully exact simulation duration in integer days, seconds, microseconds
# # and a similarly exact timestep
# # would like to do this:
# # nsteps = delta / timestep
# # but that's not supported until python 3.3 or so
# def to_quanta(td):
# """ return integer number of time quanta in the time delta object
# """
# us_per_quanta = 1000000 // datenum_precision_per_s
# return (td.days*86400 + td.seconds)*datenum_precision_per_s + \
# int( round( td.microseconds/us_per_quanta) )
# quanta_timestep = to_quanta(timestep)
# quanta_delta = to_quanta(delta)
#
# print(" quanta_timestep=",quanta_timestep)
# print(" quanta_delta=",quanta_delta)
# nsteps = quanta_delta // quanta_timestep
#
# print(" nsteps = ",nsteps)
# # double-check, going back to timedelta objects:
# err = nsteps * timestep - delta
# self.set_value('nsteps',int(nsteps))
# print("Simulation duration requires %i steps (rounding error=%s)"%(self.conf_int('nsteps'),err))
def is_grid_compatible(self,other):
""" Compare two config's, and return False if any parameters which would
affect grid partitioning/celldata/edgedata/etc. are different.
Note that differences in other input files can also cause two grids to be different,
esp. vertspace.dat
"""
# keep all lowercase
keys = ['Nkmax',
'stairstep',
'rstretch',
'CorrectVoronoi',
'VoronoiRatio',
'vertgridcorrect',
'IntDepth',
'pslg',
'points',
'edges',
'cells',
'depth',
# 'vertspace.dat.in' if rstretch==0
'topology.dat',
'edgedata',
'celldata',
'vertspace.dat']
return self.is_equal(other,limit_to_keys=keys)
class SuntansModel(hm.HydroModel):
# Annoying, but suntans doesn't like signed elevations
# this offset will be applied to grid depths and freesurface boundary conditions.
# This is error prone, though, and makes it difficult to "round-trip"
# grid information. In particular, if a new run is created by loading an old
# run, there will be an issue where the grid may get z_offset applied twice.
# This should be reimplemented as a z_datum. So no behind-the-scenes offsets,
# just have a standardized place for saying that my model's z=0 is z_offset
# from z_datum, e.g. z_datum='NAVD88' and z_offset.
# maybe the appropriate thing is a dictionary, mapping datum names to offsets.
# like z_datum['NAVD88']=-5.
z_offset=0.0
ic_ds=None
met_ds=None
# None: not a restart, or
# path to suntans.dat for the run being restarted, or True if this is
# a restart but we don't we have a separate directory for the restart,
# just StartFiles
restart=None
restart_model=None # model instance being restarted
restart_symlink=True # default to symlinking restarts
# for partition, run, etc.
sun_bin_dir=None
mpi_bin_dir=None
# 'auto': the grid and projection information will be used to
# update the coriolis parameter.
# None: leave whatever value is in the template
# <float>: use that as the coriolis parameter
coriolis_f='auto'
# experimental -- not yet working.
# the suntans code does not yet remap edge data from the original
# order to the -g ordering (which is different, even when running
# single-core).
use_edge_depths=False # write depth data per-edge in a separate file.
def __init__(self):
super(SuntansModel,self).__init__()
self.load_template(os.path.join(os.path.dirname(__file__),"data","suntans.dat"))
@property
def time0(self):
self.config['starttime']
dt=datetime.datetime.strptime(self.config['starttime'],
"%Y%m%d.%H%M%S")
return utils.to_dt64(dt)
def create_restart(self,symlink=True):
new_model=self.__class__() # in case of subclassing
# SuntansModel()
new_model.config=self.config.copy()
# things that have to match up, but are not part of the config:
new_model.num_procs=self.num_procs
new_model.restart=self.config_filename
new_model.restart_model=self
new_model.restart_symlink=symlink
# There is some extra machinery in load_grid(...) to get the right cell and
# edge depths -- this call would lose those
# new_model.set_grid(unstructured_grid.UnstructuredGrid.read_suntans(self.run_dir))
# So copy the grid we already have.
# UnstructuredGrid.copy() is naive and doesn't get all the depth fields, so
# here just pass self.grid, even though it may get mutated.
new_model.set_grid(self.grid)
new_model.run_start=self.restartable_time()
return new_model
@classmethod
def run_completed(cls,fn):
"""
fn: path to either folder containing suntans.dat, or path
to suntans.dat itself.
returns: True if the file exists and the folder contains a run which
ran to completion. Otherwise False.
"""
if not os.path.exists(fn):
return False
if os.path.isdir(fn):
fn=os.path.join(fn,"suntans.dat")
if not os.path.exists(fn):
return False
model=cls.load(fn)
if model is None:
return False
return model.is_completed()
def is_completed(self):
step_fn=os.path.join(self.run_dir,self.config['ProgressFile'])
if not os.path.exists(step_fn):
return False
with open(step_fn,'rt') as fp:
progress=fp.read()
return "100% Complete" in progress
def set_grid(self,grid):
"""
read/load grid, check for depth data and edge marks.
This does not apply the z_offset -- that is only
applied during writing out the rundata.
"""
if isinstance(grid,six.string_types):
# step in and load as suntans, rather than generic
grid=unstructured_grid.SuntansGrid(grid)
# depending on the source of the grid, it may need edges flipped
# to be consistent with suntans expectations that nc1 is always into
# the domain, and nc2 may be external
grid.orient_edges()
super(SuntansModel,self).set_grid(grid)
# 2019-05-29: trying to transition to using z for elevation, since
# 'depth' has a positive-down connotation
# make sure we have the fields expected by suntans
if 'z_bed' not in grid.cells.dtype.names:
if 'depth' in grid.cells.dtype.names:
self.log.warning("For now, assuming that cells['depth'] is positive up")
cell_z_bed=grid.cells['depth']
elif 'z_bed' in grid.nodes.dtype.names:
cell_z_bed=grid.interp_node_to_cell(grid.nodes['z_bed'])
# and avoid overlapping names
grid.delete_node_field('z_bed')
elif 'depth' in grid.nodes.dtype.names:
cell_z_bed=grid.interp_node_to_cell(grid.nodes['depth'])
self.log.warning("For now, assuming that nodes['depth'] is positive up")
else:
self.log.warning("No depth information in grid nodes or cells. Creating zero-depth")
cell_z_bed=np.zeros(grid.Ncells(),np.float64)
grid.add_cell_field('z_bed',cell_z_bed)
# with the current suntans version, depths are on cells, but model driver
# code in places wants an edge depth. so copy those here.
e2c=grid.edge_to_cells() # this is assumed in other parts of the code that do not recalculate it.
nc1=e2c[:,0].copy() ; nc2=e2c[:,1].copy()
nc1[nc1<0]=nc2[nc1<0] ; nc2[nc2<0]=nc1[nc2<0]
# edge depth is shallower of neighboring cells
# these depths are still positive up, though.
edge_z_bed=np.maximum(grid.cells['z_bed'][nc1],grid.cells['z_bed'][nc2])
if 'edge_z_bed' in grid.edges.dtype.names:
deep_edges=(grid.edges['edge_z_bed']<edge_z_bed)
if np.any(deep_edges):
self.log.info("%d edges had a specified depth deeper than neighboring cells. Replaced them"%
deep_edges.sum())
grid.edges['edge_z_bed'][deep_edges]=edge_z_bed[deep_edges]
else:
grid.add_edge_field('edge_z_bed',edge_z_bed)
if 'mark' not in grid.edges.dtype.names:
mark=np.zeros( grid.Nedges(), np.int32)
grid.add_edge_field('mark',mark)
self.grid=grid
self.set_default_edge_marks()
def set_default_edge_marks(self):
# update marks to a reasonable starting point
e2c=self.grid.edge_to_cells()
bc_edge=e2c.min(axis=1)<0
mark=self.grid.edges['mark']
mark[mark<0] = 0
mark[ (mark==0) & bc_edge ] = 1
# allow other marks to stay
self.grid.edges['mark'][:]=mark
def edge_depth(self,j,datum=None):
"""
Return the bed elevation for edge j, in meters, positive=up.
Suntans implementation relies on set_grid() having set edge depths
to be the min. of neighboring cells
"""
z=self.grid.edges['edge_z_bed'][j]
if datum is not None:
if datum=='eta0':
z+=self.initial_water_level()
return z
@classmethod
def load(cls,fn,load_grid=True,load_met=False,load_ic=False,load_bc=False):
"""
Open an existing model setup, from path to its suntans.dat
return None if run could not be loaded.
load_met: if true, load an existing Met netcdf file to self.met_ds
load_ic: likewise for initial conditions
load_bc: likewise for boundary conditions
"""
model=cls()
if os.path.isdir(fn):
fn=os.path.join(fn,'suntans.dat')
if not os.path.exists(fn):
return None
model.load_template(fn)
model.set_run_dir(os.path.dirname(fn),mode='existing')
# infer number of processors based on celldata files
# for restarts, this is overridden in infer_restart() by looking
# at the number of restart files, since in some scripts those
# are created earlier, while the celldata files aren't created until
# partition is called.
sub_cells=glob.glob( os.path.join(model.run_dir,'celldata.dat.*') )
if len(sub_cells)>0:
model.num_procs=len(sub_cells)
else:
# probably better to test whether it has even been processed
model.num_procs=1
model.infer_restart()
model.set_times_from_config()
# This will need some tweaking to fail gracefully
if load_grid:
try:
model.load_grid()
except OSError:
# this may be too strict -- a multiproc run could be fine but not
# necessarily have the global grid.
return None
if load_met:
model.load_met_ds()
if load_ic:
model.load_ic_ds()
if load_bc:
model.load_bc_ds()
return model
def load_grid(self):
"""
Set self.grid from existing suntans-format grid in self.run_dir.
"""
g=unstructured_grid.UnstructuredGrid.read_suntans(self.run_dir)
# hacked in support to read cell depths
cell_depth_fn=self.file_path('depth')+"-voro"
if ( ('z_bed' not in g.cells.dtype.names)
and
(os.path.exists(cell_depth_fn)) ):
self.log.debug("Will read cell depths, too")
cell_xyz=np.loadtxt(cell_depth_fn)
assert cell_xyz.shape[0]==g.Ncells(),"%s didn't have the right number of cells (%d vs %d)"%(cell_depth_fn,
cell_xyz.shape[0],
g.Ncells())
# cell centers can be a bit lenient in case there are centroid vs. circumcenter vs nudged
# differences.
if not np.allclose(cell_xyz[:,:2], g.cells_center()):
self.log.warning("%s cell locations don't match grid"%cell_depth_fn)
self.log.warning("Will forge ahead nevertheless")
# on disk these are positive down, but model driver convention is positive up
# (despite being called depth...)
g.add_cell_field('z_bed',-cell_xyz[:,2])
g.add_cell_field('depth',-cell_xyz[:,2]) # will be phased out
# hacked in support to read depth on edges
edge_depth_fn=self.file_path('depth')+"-edge"
if ( ('edge_z_bed' not in g.edges.dtype.names)
and
(os.path.exists(edge_depth_fn)) ):
self.log.debug("Will read edge depths, too")
edge_xyz=np.loadtxt(edge_depth_fn)
assert edge_xyz.shape[0]==g.Nedges(),"%s didn't have the right number of edges (%d vs %d)"%(edge_depth_fn,
edge_xyz.shape[0],
g.Nedges())
assert np.allclose(edge_xyz[:,:2], g.edges_center()),"%s edge locations don't match"%edge_depth_fn
# on disk these are positive down, but model driver convention is positive up
# (despite being called depth...) -- in the process of using edge_z_bed in the driver r
# script to make the sign convention more apparent.
# g.add_edge_field('edge_depth',-edge_xyz[:,2]) # being phased out
g.add_edge_field('edge_z_bed',-edge_xyz[:,2])
self.set_grid(g)
return g
def infer_restart(self):
"""
See if this run is a restart.
Sets self.restart to:
None: not a restart
True: is a restart, but insufficient information to find the parent run
string: path to suntans.dat for the parent run
"""
if self.config['StartFile'] is None:
# Possibly not a valid config file
self.restart=None
return
start_path=os.path.join(self.run_dir,self.config['StartFile']+".0")
if os.path.exists(start_path):
log.debug("Looks like a restart")
self.restart=True
# Get num_procs from the number of restart files.
for proc in range(1024):
fn=os.path.join(self.run_dir,self.config['StartFile']+".%d"%proc)
if not os.path.exists(fn):
break
self.num_procs=proc
log.debug("Restart appears to have %d subdomains"%self.num_procs)
if os.path.islink(start_path):
start_path=os.path.realpath(start_path)
parent_dir=os.path.dirname(start_path)
assert not os.path.samefile(parent_dir,self.run_dir)
parent_sun=os.path.join(parent_dir,"suntans.dat")
if os.path.exists(parent_sun):
log.debug("And the previous suntans.dat: %s"%parent_sun)
self.restart=parent_sun
else:
log.info("Checked for %s but no luck"%parent_sun)
else:
log.info("Restart file %s is not a link"%start_path)
else:
log.debug("Does not look like a restart based on %s"%start_path)
self.restart=None
def chain_restarts(self,count=None,load_grid=False):
"""
return a list of up to count (None: unlimited) Model instances
in forward chronological order of consecutive restarts.
load_grid: defaults to *not* loading the grid of the earlier runs.
The last item is always self.
count: either the count of how many runs to return, or a np.datetime64
such that we'll go back to a run covering that date if possible.
if this is a tuple of datetimes, only return the runs covering that time
range.
"""
runs=[self]
run=self
while 1:
if isinstance(count,np.datetime64):
if runs[0].run_start <=count:
break
elif isinstance(count,tuple):
if runs[0].run_start < count[0]:
break
elif count and len(runs)>=count:
break
run.infer_restart()
if run.restart and run.restart is not True:
run=SuntansModel.load(run.restart,load_grid=load_grid)
runs.insert(0,run)
else:
break
if isinstance(count,tuple):
# Trim runs coming after the requested period
runs=[run for run in runs if run.run_start<count[1]]
if len(runs)==0:
log.warning("chain_restarts wound up with zero runs for count=%s"%str(count))
return runs
def chain_start(self,count=None):
"""
Analog of run_start, but across chained restarts.
count is passed to chain_restarts().
"""
runs=self.chain_restarts(count=count)
return runs[0].run_start
def chain_stop(self,count=None):
"""
Analog of run_stop, but across chained restarts.
Included for completeness, but this is always the same
as self.run_stop (since we don't chain forward in time).
"""
return self.run_stop
def load_template(self,fn):
self.template_fn=fn
self.config=SunConfig(fn)
def set_run_dir(self,path,mode='create'):
assert mode!='clean',"Suntans driver doesn't know what clean is"
return super(SuntansModel,self).set_run_dir(path,mode)
def file_path(self,key,proc=None):
fn=os.path.join(self.run_dir,self.config[key])
if proc is not None:
fn+=".%d"%proc
return fn
@property
def config_filename(self):
return os.path.join(self.run_dir,"suntans.dat")
def write_config(self):
log.info("Writing config to %s"%self.config_filename)
self.config.write_config(self.config_filename)
def write_monitor(self):
if not self.mon_points: return
xys=[ np.array(feat['geom']) for feat in self.mon_points]
valid_xys=[xy
for xy in xys
if self.grid.select_cells_nearest(xy,inside=True) is not None]
np.savetxt( os.path.join(self.run_dir,self.config['DataLocations']),
np.array(valid_xys) )
def write(self):
self.update_config()
self.write_config()
self.write_monitor()
self.write_extra_files()
self.write_forcing()
# Must come after write_forcing() to allow BCs to modify grid
self.write_grid()
# Must come after write_forcing(), to get proper grid and to
# have access to freesurface BCs
if self.restart:
self.log.info("Even though this is a restart, write IC")
# There are times that it is useful to be able to read the IC
# back in, e.g. to set a boundary condition equal to its initial
# condition. For a restart, this would ideally be the same state
# as in the StartFiles. That's going to take some work for
# relatively little gain. So just create the IC as if this was
# not a restart.
self.write_ic()
if self.restart:
self.write_startfiles()
def initialize_initial_condition(self):
"""
Populate self.ic_ds with a baseline initial condition.
This should be called after all boundary conditions are in place.
"""
self.ic_ds=self.zero_initial_condition()
self.set_initial_h_from_bc()
def write_ic(self):
"""
Will have to think about how best to order this -- really need
to set this as a zero earlier on, and then have some known time
for the script to modify it, before finally writing it out here.
"""
# Creating an initial condition netcdf file:
if self.ic_ds is None:
self.initialize_initial_condition()
self.write_ic_ds()
def write_startfiles(self):
src_base=os.path.join(os.path.dirname(self.restart),
self.restart_model.config['StoreFile'])
dst_base=os.path.join(self.run_dir,self.config['StartFile'])
for proc in range(self.num_procs):
src=src_base+".%d"%proc
dst=dst_base+".%d"%proc
self.restart_copier(src,dst)
def copy_ic_to_bc(self,ic_var,bc_var):
"""
Copy IC values to the boundary conditions
Copies data for the given IC variable (e.g. 'salt'), to
open and flow boundaries for bc_var (e.g. 'S').
for flow boundaries, 'boundary_' is prepended to bc_var.
The initial condition is copied into bc_ds for all time steps,
and all layers.
"""
# Open boundaries
for ci,c in enumerate(utils.progress(self.bc_ds.cellp.values,msg="IC=>Open BCs")):
ic_values = self.ic_ds[ic_var].values[0,:,c]
self.bc_ds[bc_var].isel(Ntype3=ci).values[:,:]=ic_values[None,:]
# Flow boundaries
for ei,e in enumerate(utils.progress(self.bc_ds.edgep.values,msg="IC=>Flow BCs")):
c=self.grid.edges['cells'][e,0]
assert c>=0,"Is this edge flipped"
ic_values=self.ic_ds[ic_var].values[0,:,c]
self.bc_ds["boundary_"+bc_var].isel(Ntype2=ei).values[:,:]=ic_values[None,:]
def write_ic_ds(self):
self.ic_ds.to_netcdf( os.path.join(self.run_dir,self.config['initialNCfile']) )
def load_ic_ds(self):
fn=os.path.join(self.run_dir,self.config['initialNCfile'])
if not os.path.exists(fn): return False
self.ic_ds=xr.open_dataset(fn)
def set_initial_h_from_bc(self):
"""
prereq: self.bc_ds has been set.
"""
if len(self.bc_ds.Ntype3)==0:
log.warning("Cannot set initial h from BC because there are no type 3 edges")
return
time_i=np.searchsorted(self.bc_ds.time.values,self.run_start)
# both bc_ds and ic_ds should already incorporate the depth offset, so
# no further adjustment here.
h=self.bc_ds.h.isel(Nt=time_i).mean().values
# this is positive down, already shifted, clipped.
#cell_depths=self.ic_ds['dv'].values
# This led to drying issues in 3D, and ultimately was not the fix
# for issues in 2D
#self.ic_ds.eta.values[:]=np.maximum(h,-cell_depths)
self.ic_ds.eta.values[...]=h
log.info("Setting initial eta from BCs, value=max(z_bed,%.4f) (including z_offset of %.2f)"%(h,self.z_offset))
def write_forcing(self,overwrite=True):
# these map to lists of BCs, in case there are BC with mode='add'
# map edge to BC data
self.bc_type2=defaultdict(lambda: defaultdict(list)) # [<edge index>][<variable>]=>[DataArray,...]
# map cell to BC data
self.bc_type3=defaultdict(lambda: defaultdict(list)) # [<cell index>][<variable>]=>[DataArray,...]
# Flow BCs are handled specially since they apply across a group of edges
# Each participating edge should have an entry in bc_type2,
# [<edge index>]["Q"]=>"segment_name"
# and a corresponding entry in here:
self.bc_type2_segments=defaultdict(lambda: defaultdict(list)) # [<segment name>][<variable>]=>[DataArray,...]
# point sources.
# indexed by a tuple of (cell,k)
# [(cell,k][<variable>] => [DataArray]
self.bc_point_sources=defaultdict(lambda: defaultdict(list))
super(SuntansModel,self).write_forcing()
# Get a time series that's the superset of all given timeseries
all_times=[]
# edge, cells, groups of edges
for bc_typ in [self.bc_type2,self.bc_type3,self.bc_type2_segments]:
for bc in bc_typ.values(): # each edge idx/cell idx/segment name
for vlist in bc.values(): # each variable on that edge/cell/segment
for v in vlist: #list of BCs for this variable on this element
if isinstance(v,six.string_types):
# type2 edges which reference a segment have no
# time series of their own.
continue
if 'time' in v.dims:
all_times.append( v['time'].values )
if all_times:
common_time=np.unique(np.concatenate(all_times))
else:
# no boundary conditions have times, so fabricate.
common_time=np.array( [self.run_start,self.run_stop] )
# Make sure that brackets the run:
pad=np.timedelta64(1,'D')
if common_time[0]>=self.run_start:
common_time=np.concatenate(( [self.run_start-pad],
common_time ))
# make sure there are *two* times beyond the end for quadratic
# interpolation
while len(common_time)<3 or common_time[-2]<=self.run_stop:
if common_time[-1]<self.run_stop+pad:
new_time=self.run_stop+pad
else:
new_time=common_time[-1]+pad
common_time=np.concatenate((common_time,[new_time]))
# SUNTANS applies quadratic interpolation in time, so it requires at least
# 3 time values - seems that it wants one time before and two times after
# the current time.
assert len(common_time)>2
self.bc_time=common_time
self.bc_ds=self.compile_bcs()
self.write_bc_ds()
if self.met_ds is None:
self.met_ds=self.zero_met()
self.write_met_ds()
def ds_time_units(self):
"""
setting for how to write time to netcdf
specifically as suntans expects. pass as
...
encoding=dict(time={'units':self.ds_time_units()}),
...
in xarray dataset to_netcdf(..)
"""
basetime=self.config['basetime']
assert len(basetime)==15 # YYYYMMDD.hhmmss
time_units="seconds since %s-%s-%s %s:%s:%s"%(basetime[0:4],
basetime[4:6],
basetime[6:8],
basetime[9:11],
basetime[11:13],
basetime[13:15])
return time_units
def write_bc_ds(self):
self.bc_ds.to_netcdf( os.path.join(self.run_dir,
self.config['netcdfBdyFile']),
encoding=dict(time={'units':self.ds_time_units()}))
def load_bc_ds(self):
fn=os.path.join(self.run_dir,
self.config['netcdfBdyFile'])
if not os.path.exists(fn): return False
self.bc_ds=xr.open_dataset(fn)
return self.bc_ds
def write_met_ds(self):
fn=os.path.join(self.run_dir,
self.config['metfile'])
if os.path.exists(fn):
log.info("Will replace %s"%fn)
os.unlink(fn)
else:
log.debug("Writing met ds to %s"%fn)
log.debug(str(self.met_ds))
self.met_ds.to_netcdf( fn,
encoding=dict(nt={'units':self.ds_time_units()},
Time={'units':self.ds_time_units()}) )
def load_met_ds(self):
fn=os.path.join(self.run_dir,
self.config['metfile'])
if not os.path.exists(fn): return False
self.met_ds=xr.open_dataset(fn)
def layer_data(self,with_offset=False,edge_index=None,cell_index=None,z_bed=None):
"""
Returns layer data without z_offset applied, and
positive up.
with no additional arguments, returns global information. edge_index or
cell_index will use a z_bed based on that element. z_bed is used to clip
z layers. z_bed should be a positive-up quantity. A specified z_bed
takes precendece over edge_index or cell_index.
Returns a xr.Dataset
with z_min, z_max, Nk, z_interface, z_mid.
z_interface and z_mid are ordered surface to bed.
if with_offset is True, the z_offset is included, which yields
more accurate (i.e. similar to suntans) layers when there is stretching
"""
if z_bed is None:
if edge_index is not None:
z_bed=self.grid.edge_depths()[edge_index]
elif cell_index is not None:
z_bed=self.grid.cell_depths()[cell_index]
Nk=int(self.config['Nkmax'])
z_min=self.grid.cells['z_bed'].min() # bed
z_max=self.grid.cells['z_bed'].max() # surface
r=float(self.config['rstretch'])
if with_offset:
z_min-=self.z_offset
z_max=0
depth=-z_min # positive:down
dzs=np.zeros(Nk, np.float64)
if r>1.0:
dzs[0]=depth*(r-1)/(r**Nk-1)
for k in range(1,Nk):
dzs[k]=r*dzs[k-1]
else:
dzs[:]=depth/float(Nk)
z_interface=np.concatenate( ( [z_max],
z_max-np.cumsum(dzs) ) )
z_mid=0.5*(z_interface[:-1]+z_interface[1:])
ds=xr.Dataset()
ds['z_min']=(),z_min
ds['z_max']=(),z_max
ds['z_interface']=('Nkp1',),z_interface
ds['z_mid']=('Nk',),z_mid
for v in ['z_min','z_max','z_interface','z_mid']:
ds[v].attrs['positive']='up'
return ds
def compile_bcs(self):
"""
Postprocess the information from write_forcing()
to create the BC netcdf dataset.
Note that bc_ds includes the z_offset.
"""
ds=xr.Dataset()
layers=self.layer_data()
Nk=layers.dims['Nk']
ds['z']=('Nk',),-(layers.z_mid.values + self.z_offset)
# suntans assumes that this dimension is Nt, not time
Nt=len(self.bc_time)
ds['time']=('Nt',),self.bc_time
# Scalars will introduce type3 and type2 because they may not know
# what type of flow forcing is there. Here we skim out scalars that
# do not have an associated h (type3) or flow (type2) boundary
# the list(...keys()) part is to make a copy, so the del's
# don't upset the iteration
for cell in list(self.bc_type3.keys()):
if 'h' not in self.bc_type3[cell]:
del self.bc_type3[cell]
# 'u' 'v' and 'Q' for type2
for edge in list(self.bc_type2.keys()):
if not ( ('u' in self.bc_type2[edge]) or
('v' in self.bc_type2[edge]) or
('Q' in self.bc_type2[edge])):
del self.bc_type2[edge]
Ntype3=len(self.bc_type3)
ds['cellp']=('Ntype3',),np.zeros(Ntype3,np.int32)-1
ds['xv']=('Ntype3',),np.zeros(Ntype3,np.float64)
ds['yv']=('Ntype3',),np.zeros(Ntype3,np.float64)
# the actual data variables for type 3:
ds['uc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['vc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['wc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['T']=('Nt','Nk','Ntype3',),20*np.ones((Nt,Nk,Ntype3),np.float64)
ds['S']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['h']=('Nt','Ntype3'),np.zeros( (Nt, Ntype3), np.float64 )
def interp_time(da):
if 'time' not in da.dims: # constant value
# this should do the right thing for both scalar and vector
# values
return da.values * np.ones( (Nt,)+da.values.shape )
if da.ndim==2:
assert da.dims[0]=='time'
# recursively call per-layer, which is assumed to be the second
# dimension
profiles=[ interp_time(da[:,i]) for i in range(da.shape[1]) ]
return np.vstack(profiles).T
return np.interp( utils.to_dnum(ds.time.values),
utils.to_dnum(da.time.values), da.values )
import time
elapsed=[0.0]
def combine_items(values,bc_items,offset=0.0):
base_item=None
# include the last mode='overwrite' bc, and sum the mode='add'
# bcs.
values[:]=offset
# aside from Q and h, other variables are 3D, which means
# that if the data comes back 2D, pad out the layer dimension
def pad_dims(data):
if values.ndim==2 and data.ndim==1:
return data[:,None] # broadcastable vertical dimension
else:
return data
for bc_item in bc_items:
if bc_item.mode=='add':
t0=time.time()
values[:] += pad_dims(interp_time(bc_item))
elapsed[0]+=time.time()-t0
else:
base_item=bc_item
if base_item is None:
self.log.warning("BC for cell %d has no overwrite items"%type3_cell)
else:
t0=time.time()
values[:] += pad_dims(interp_time(base_item))
elapsed[0]+=time.time()-t0
cc=self.grid.cells_center()
for type3_i,type3_cell in enumerate(self.bc_type3): # each edge/cell
ds['cellp'].values[type3_i]=type3_cell
ds['xv'].values[type3_i]=cc[type3_cell,0]
ds['yv'].values[type3_i]=cc[type3_cell,1]
bc=self.bc_type3[type3_cell]
for v in bc.keys(): # each variable on that edge/cell
if v=='h':
offset=self.z_offset
else:
offset=0
# will set bc values in place
combine_items(ds[v].isel(Ntype3=type3_i).values,
bc[v],
offset=offset)
Ntype2=len(self.bc_type2)
Nseg=len(self.bc_type2_segments)
ds['edgep']=('Ntype2',),np.zeros(Ntype2,np.int32)-1
ds['xe']=('Ntype2',),np.zeros(Ntype2,np.float64)
ds['ye']=('Ntype2',),np.zeros(Ntype2,np.float64)
ds['boundary_h']=('Nt','Ntype2'),np.zeros( (Nt, Ntype2), np.float64) + self.z_offset
ds['boundary_u']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_v']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_w']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_T']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_S']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_Q']=('Nt','Nseg'),np.zeros( (Nt, Nseg), np.float64)
# Iterate over segments first, so that edges below can grab the correct
# index.
segment_names=list(self.bc_type2_segments.keys()) # this establishes the order of the segments
# make this distinct from 0 or 1 to aid debugging
segment_ids=100 + np.arange(len(segment_names))
ds['seg_name']=('Nseg',),segment_names # not read by suntans, but maybe helps debugging
ds['segedgep']=('Ntype2',),np.zeros(Ntype2,np.int32)-1
ds['segp']=('Nseg',),segment_ids # np.arange(Nseg,dtype=np.int32)
for seg_i,seg_name in enumerate(segment_names):
bc=self.bc_type2_segments[seg_name]
for v in bc.keys(): # only Q, but stick to the same pattern
combine_items(ds['boundary_'+v].isel(Nseg=seg_i).values,
bc[v])
ec=self.grid.edges_center()
for type2_i,type2_edge in enumerate(self.bc_type2): # each edge
ds['edgep'].values[type2_i]=type2_edge
ds['xe'].values[type2_i]=ec[type2_edge,0]
ds['ye'].values[type2_i]=ec[type2_edge,1]
bc=self.bc_type2[type2_edge]
for v in bc.keys(): # each variable on that edge/cell
if v=='h':
offset=self.z_offset
else:
offset=0.0
if v!='Q':
combine_items(ds['boundary_'+v].isel(Ntype2=type2_i).values,
bc[v],offset)
else:
seg_name=bc[v]
# too lazy to work through the right way to deal with combined
# bcs for Q right now, so just warn the user that it may be
# a problem.
if len(seg_name)!=1:
log.warning("Only tested with a single value, but got %s"%str(seg_name))
seg_name=seg_name[0]
seg_idx=segment_ids[segment_names.index(seg_name)]
ds['segedgep'].values[type2_i] = seg_idx
# -- Set grid marks --
for c in ds.cellp.values:
assert c>=0
for j in self.grid.cell_to_edges(c):
j_cells=self.grid.edge_to_cells(j)
if j_cells.min()<0:# boundary
self.grid.edges['mark'][j]=3 # set to type 3
for j in ds.edgep.values:
assert j>=0,"Some edge pointers did not get set"
self.grid.edges['mark'][j]=2
# --- Point source code ---
Npoint=len(self.bc_point_sources)
ds['point_cell']=('Npoint',), np.zeros(Npoint,np.int32) # point_cell
ds['point_layer']=('Npoint',), np.zeros(Npoint,np.int32) # point_layer
ds['point_Q']=('Nt','Npoint'), np.zeros( (Nt,Npoint), np.float64) # np.stack(point_Q,axis=-1)
ds['point_S']=('Nt','Npoint'), np.zeros( (Nt,Npoint), np.float64) # np.stack(point_S,axis=-1)
ds['point_T']=('Nt','Npoint'), np.zeros( (Nt,Npoint), np.float64) # np.stack(point_T,axis=-1)
for pnt_idx,key in enumerate(self.bc_point_sources.keys()):
(c,k)=key
log.info("Point source for cell=%d, k=%d"%(c,k))
assert 'Q' in self.bc_point_sources[key]
combine_items(ds['point_Q'].isel(Npoint=pnt_idx).values,
self.bc_point_sources[key]['Q'])
ds['point_cell'].values[pnt_idx]=c
ds['point_layer'].values[pnt_idx]=k
# really shaky ground here..
if 'T' in self.bc_point_sources[key]:
combine_items( ds['point_T'].isel(Npoint=pnt_idx).values,
self.bc_point_sources[key]['T'] )
if 'S' in self.bc_point_sources[key]:
combine_items( ds['point_S'].isel(Npoint=pnt_idx).values,
self.bc_point_sources[key]['S'] )
# End new point source code
log.info("Total time in interp_time: %.3fs"%elapsed[0])
return ds
def write_bc(self,bc):
if isinstance(bc,hm.StageBC):
self.write_stage_bc(bc)
elif isinstance(bc,hm.SourceSinkBC):
self.write_source_sink_bc(bc)
elif isinstance(bc,hm.FlowBC):
self.write_flow_bc(bc)
elif isinstance(bc,hm.VelocityBC):
self.write_velocity_bc(bc)
elif isinstance(bc,hm.ScalarBC):
self.write_scalar_bc(bc)
else:
super(SuntansModel,self).write_bc(bc)
def write_stage_bc(self,bc):
water_level=bc.data()
assert len(water_level.dims)<=1,"Water level must have dims either time, or none"
cells=bc.grid_cells or self.bc_geom_to_cells(bc.geom)
for cell in cells:
self.bc_type3[cell]['h'].append(water_level)
def write_velocity_bc(self,bc):
# interface isn't exactly nailed down with the BC
# classes. whether the model wants vector velocity
# or normal velocity varies by model. could
# standardize on vector velocity, and project to normal
# here?
ds=bc.dataset()
edges=bc.grid_edges or self.bc_geom_to_edges(bc.geom)
for j in edges:
for comp in ['u','v']:
da=ds[comp]
da.attrs['mode']=bc.mode
self.bc_type2[j][comp].append(da)
def write_scalar_bc(self,bc):
da=bc.data()
scalar_name=bc.scalar
# canonicalize scalar names for suntans BC files
if scalar_name.lower() in ['salinity','salt','s']:
scalar_name='S'
elif scalar_name.lower() in ['temp','t','temperature']:
scalar_name='T'
else:
self.log.warning("Scalar %s is not S or T or similar"%scalar_name)
# scalars could be set on edges or cells, or points in cells
# this should be expanded to make more use of the information in bc.parent
# if that is set
if bc.geom.type=='Point':
self.log.info("Assuming that Point geometry on a scalar bc implies point source")
ck=self.bc_to_interior_cell_layer(bc) # (cell,layer) tuple
self.bc_point_sources[ck][scalar_name].append(da)
else:
# info is duplicated on type2 (flow) and type3 (stage) BCs, which
# is sorted out later.
for j in (bc.grid_edges or self.bc_geom_to_edges(bc.geom)):
self.bc_type2[j][scalar_name].append(da)
for cell in (bc.grid_cells or self.bc_geom_to_cells(bc.geom)):
self.bc_type3[cell][scalar_name].append(da)
def dredge_boundary(self,linestring,dredge_depth):
# Restarts appear to be making dredge calls. Not sure why.
print("Call to dredge_boundary, restart is",self.restart)
return super(SuntansModel,self).dredge_boundary(linestring,dredge_depth,
edge_field='edge_z_bed',
cell_field='z_bed')
def dredge_discharge(self,point,dredge_depth):
print("Call to dredge discharge, restart is",self.restart)
return super(SuntansModel,self).dredge_discharge(point,dredge_depth,
edge_field='edge_z_bed',
cell_field='z_bed')
def write_flow_bc(self,bc):
da=bc.data()
self.bc_type2_segments[bc.name]['Q'].append(da)
assert len(da.dims)<=1,"Flow must have dims either time, or none"
if (bc.dredge_depth is not None) and (self.restart is None):
log.info("Dredging grid for flow boundary %s"%bc.name)
self.dredge_boundary(np.array(bc.geom.coords),
bc.dredge_depth)
edges=(bc.grid_edges or self.bc_geom_to_edges(bc.geom))
for j in edges:
self.bc_type2[j]['Q'].append(bc.name)
def write_source_sink_bc(self,bc):
da=bc.data()
assert bc.geom.type=='Point',"Suntans driver does not support src/sink pair"
if (bc.dredge_depth is not None) and (self.restart is None):
# Additionally modify the grid to make sure there is a place for inflow to
# come in.
log.info("Dredging grid for source/sink BC %s"%bc.name)
self.dredge_discharge(np.array(bc.geom.coords),
bc.dredge_depth)
ck=self.bc_to_interior_cell_layer(bc) # (cell,layer) tuple
self.bc_point_sources[ck]['Q'].append(da)
assert len(da.dims)<=1,"Flow must have dims either time, or none"
def bc_geom_to_cells(self,geom):
""" geom: a LineString geometry. Return the list of cells interior
to the linestring
"""
cells=[]
for j in self.bc_geom_to_edges(geom):
j_cells=self.grid.edge_to_cells(j)
assert j_cells.min()<0
assert j_cells.max()>=0
cells.append(j_cells.max())
return cells
def bc_to_interior_cell_layer(self,bc):
"""
Determine the cell and layer for a source/sink BC.
"""
# TODO: use bc.z, which is either an elevation or a 'bed'
# to choose the layer
c=self.bc_geom_to_interior_cell(bc.geom)
self.log.warning("Assuming source/sink is at bed")
k=int(self.config['Nkmax'])-1
return (c,k)
def bc_geom_to_interior_cell(self,geom):
""" geom: a Point or LineString geometry. In the case of a LineString,
only the first point is used.
return the index of the cell that the point or linestring node fall in
"""
coords=np.array(geom)
if coords.ndim==2:
coords=coords[0]
c=self.grid.select_cells_nearest(coords,inside=True)
assert c is not None,"%s did not match any cells. LineString may be reversed?"%str(coords)
return c
def bc_geom_to_edges(self,geom):
"""
geom: LineString geometry
return list of boundary edges adjacent to geom.
"""
return self.grid.select_edges_by_polyline(geom,update_e2c=False)
def set_times_from_config(self):
"""
Pull run_start,run_stop from a loaded config file.
"""
if self.restart:
start_files=self.start_inputs()
if start_files:
start=store_file.StoreFile(model=self,proc=0,filename=start_files[0])
self.run_start=start.time()
else:
# maybe we're constructing a restart? sequencing of this stuff,
# and the exact state of the model is quirky and under-designed
self.run_start=self.restart_model.restartable_time()
log.debug("Inferred start time of restart to be %s"%self.run_start)
else:
start_dt=datetime.datetime.strptime(self.config['starttime'],'%Y%m%d.%H%M%S')
self.run_start=utils.to_dt64(start_dt)
nsteps=int(self.config['nsteps'])
dt=np.timedelta64(1,'us') * int(1e6*float(self.config['dt']))
self.run_stop=self.run_start + nsteps*dt
def update_config(self):
assert self.config is not None,"Only support starting from template"
# Have to be careful about the difference between starttime,
# which reflects time0, and the start of the initial run,
# vs. run_start, which is when this simulation will begin
# (possibly restarting a prior simulation)
start_dt=utils.to_datetime(self.run_start)
end_dt=utils.to_datetime(self.run_stop)
# In the case of restarts, this needs to reflect the
# start of the first simulation, not a later restart.
if not self.restart:
self.config['starttime']=start_dt.strftime('%Y%m%d.%H%M%S')
else:
log.info("starttime pulled from previous run: %s"%self.config['starttime'])
restart_time=self.restart_model.restartable_time()
assert self.run_start==restart_time,"Configured sim start and restart timestamp don't match"
dt=np.timedelta64(1,'us') * int(1e6*float(self.config['dt']))
nsteps=(self.run_stop-self.run_start)/dt
log.info("Number of steps in this simulation: %d"%nsteps)
self.config['nsteps']=nsteps
max_faces=self.grid.max_sides
if int(self.config['maxFaces']) < max_faces:
log.debug("Increasing maxFaces to %d"%max_faces)
self.config['maxFaces']=max_faces
if self.coriolis_f=='auto':
if self.projection is None:
log.warning("No projection and coriolis_f is 'auto'. No coriolis!")
self.config['Coriolis_f']=0.0
else:
xy_ctr=self.grid.nodes['x'].mean(axis=0)
ll_ctr=self.native_to_ll(xy_ctr)
lat=ll_ctr[1]
# f=2*Omega*sin(phi)
Omega=7.2921e-5 # rad/s
f=2*Omega*np.sin(lat*np.pi/180.)
self.config['Coriolis_f']="%.5e"%f
log.debug("Using %.2f as latitude for Coriolis => f=%s"%(lat,self.config['Coriolis_f']))
elif self.coriolis_f is not None:
self.config['Coriolis_f']=self.coriolis_f
if len(self.mon_points):
self.config['numInterpPoints']=1
self.config['DataLocations']='profile_locs.dat'
self.config['NkmaxProfs']=0 # all layers
self.config['ProfileDataFile']="profdata.dat"
# could also make sure that config['ProfileVariables'] has a default like 'hu'
# and ntoutProfs has a reasonable value.
def restart_copier(self,src,dst,on_exists='replace_link'):
"""
src: source file for copy, relative to present working dir
dst: destination.
will either symlink or copy src to dst, based on self.restart_symlink
setting
In order to avoid a limit on chained symlinks, symlinks will point to
the original file.
"""
if os.path.lexists(dst):
# Allow replacing symlinks, but if dst is a real file, bail out
# to avoid anything sketchy
if on_exists=='replace_link' and os.path.islink(dst):
os.unlink(dst)
elif on_exists=='replace':
os.unlink(dst)
elif on_exists=='fail':
raise Exception("Restart copier %s=>%s found destination already exists. on_exists='fail'"%(src,dst))
else:
raise Exception("Unknown option for on_exists: %s"%on_exists)
if self.restart_symlink:
# this ensures that we don't build up long chains of
# symlinks
src=os.path.realpath(src)
src_rel=os.path.relpath(src,self.run_dir)
os.symlink(src_rel,dst)
else:
shutil.copyfile(src,dst)
def write_grid(self):
if not self.restart:
# Write a grid that suntans will read:
self.grid.write_suntans_hybrid(self.run_dir,overwrite=True,z_offset=self.z_offset)
self.write_grid_bathy()
else:
parent_base=os.path.dirname(self.restart)
for fn in ['cells.dat','edges.dat',
'points.dat','depths.dat-voro']:
self.restart_copier(os.path.join(parent_base,fn),
os.path.join(self.run_dir,fn))
def write_grid_bathy(self):
# And write cell bathymetry separately
# This filename is hardcoded into suntans, not part of
# the settings in suntans.dat (maybe it can be overridden?)
cell_depth_fn=os.path.join(self.run_dir,"depths.dat-voro")
cell_xy=self.grid.cells_center()
# make depth positive down
z=-(self.grid.cells['z_bed'] + self.z_offset)
min_depth=0+float(self.config['minimum_depth'])
shallow=z<min_depth
if np.any(shallow):
log.warning("%d of %d cell depths extend above z=0 even with offset of %.2f"%(np.sum(shallow),
len(shallow),
self.z_offset))
z=z.clip(min_depth,np.inf)
cell_xyz=np.c_[cell_xy,z]
np.savetxt(cell_depth_fn,cell_xyz) # space separated
if self.use_edge_depths:
# And edges, preparing for edge-based bathy
edge_depth_fn=os.path.join(self.run_dir,"depths.dat-edge")
edge_xy=self.grid.edges_center()
# make depth positive down
edge_depth=-(self.grid.edges['edge_z_bed'] + self.z_offset)
edge_xyz=np.c_[edge_xy,edge_depth]
np.savetxt(edge_depth_fn,edge_xyz) # space separated
def grid_as_dataset(self):
"""
Return the grid and vertical geometry in a xr.Dataset
following the naming of suntans/ugrid.
Note that this does not yet set all attributes -- TODO!
This method does apply z_offset to the grid.
"""
ds=self.grid.write_to_xarray()
ds=ds.rename({'face':'Nc',
'edge':'Ne',
'node':'Np',
'node_per_edge':'two',
'maxnode_per_face':'numsides'})
layers=self.layer_data()
z_min=layers.z_min.values
z_max=layers.z_max.values
Nk=layers.dims['Nk']
cc=self.grid.cells_center()
ds['xv']=('Nc',),cc[:,0]
ds['yv']=('Nc',),cc[:,1]
ds['z_r']=('Nk',),layers.z_mid.values + self.z_offset
ds['z_r'].attrs['positive']='down'
# not right for 3D..
ds['Nk']=('Nc',),Nk*np.ones(self.grid.Ncells(),np.int32)
# don't take any chances on ugrid assumptions -- exactly mimic
# the example:
ds['suntans_mesh']=(),0
ds.suntans_mesh.attrs.update( dict(cf_role='mesh_topology',
long_name='Topology data of 2D unstructured mesh',
topology_dimension=2,
node_coordinates="xp yp",
face_node_connectivity="cells",
edge_node_connectivity="edges",
face_coordinates="xv yv",
edge_coordinates="xe ye",
face_edge_connectivity="face",
edge_face_connectivity="grad") )
ds['cells']=('Nc','numsides'),self.grid.cells['nodes']
ds['nfaces']=('Nc',), [self.grid.cell_Nsides(c) for c in range(self.grid.Ncells())]
ds['edges']=('Ne','two'),self.grid.edges['nodes']
ds['neigh']=('Nc','numsides'), [self.grid.cell_to_cells(c,pad=True)
for c in range(self.grid.Ncells())]
ds['grad']=('Ne','two'),self.grid.edge_to_cells()
ds['xp']=('Np',),self.grid.nodes['x'][:,0]
ds['yp']=('Np',),self.grid.nodes['x'][:,1]
depth=-(self.grid.cells['z_bed'] + self.z_offset)
ds['dv']=('Nc',),depth.clip(float(self.config['minimum_depth']),np.inf)
# really ought to set attrs for everybody, but sign of depth is
# particular, so go ahead and do it here.
ds.dv.attrs.update( dict( standard_name='sea_floor_depth_below_geoid',
long_name='seafloor depth',
comment='Has offset of %.3f applied'%(-self.z_offset),
units='m',
mesh='suntans_mesh',
location='face',
positive='down') )
ds['dz']=('Nk',),-np.diff(layers.z_interface.values)
ds['mark']=('Ne',),self.grid.edges['mark']
return ds
def zero_initial_condition(self):
"""
Return a xr.Dataset for initial conditions, with all values
initialized to nominal zero values.
This dataset has z_offset applied.
"""
ds_ic=self.grid_as_dataset()
ds_ic['time']=('time',),[self.run_start]
for name,dims in [ ('eta',('time','Nc')),
('uc', ('time','Nk','Nc')),
('vc', ('time','Nk','Nc')),
('salt',('time','Nk','Nc')),
('temp',('time','Nk','Nc')),
('agec',('time','Nk','Nc')),
('agesource',('Nk','Nc')) ]:
shape=tuple( [ds_ic.dims[d] for d in dims] )
if name=='agealpha':
dtype=np.timedelta64
else:
dtype=np.float64
vals=np.zeros(shape,dtype)
if name=='eta':
vals += self.z_offset
ds_ic[name]=dims,vals
return ds_ic
met_pad=np.timedelta64(1,'D')
def zero_met(self,times=None):
"""
Create an empty (zero valued, and T=20degC) dataset for met
forcing.
times: defaults to 4 time steps bracketing the run, pass in
other ndarray(datetime64) to override
"""
ds_met=xr.Dataset()
# this is nt in the sample, but maybe time is okay??
# nope -- needs to be nt.
# quadratic interpolation is used, so we need to pad out before/after
# the simulation
if times is None:
times=[self.run_start-self.met_pad,
self.run_start,
self.run_stop,
self.run_stop+self.met_pad]
ds_met['nt']=('nt',),times
ds_met['Time']=('nt',),ds_met.nt.values
xxyy=self.grid.bounds()
xy0=[ 0.5*(xxyy[0]+xxyy[1]), 0.5*(xxyy[2]+xxyy[3])]
ll0=self.native_to_ll(xy0)
for name in ['Uwind','Vwind','Tair','Pair','RH','rain','cloud']:
ds_met["x_"+name]=("N"+name,),[ll0[0]]
ds_met["y_"+name]=("N"+name,),[ll0[1]]
ds_met["z_"+name]=("N"+name,),[10]
def const(dims,val):
shape=tuple( [ds_met.dims[d] for d in dims] )
return dims,val*np.ones(shape)
ds_met['Uwind']=const(('nt','NUwind'), 0.0)
ds_met['Vwind']=const(('nt','NVwind'), 0.0)
ds_met['Tair'] =const(('nt','NTair'), 20.0)
ds_met['Pair'] =const(('nt','NPair'), 1000.) # units?
ds_met['RH']=const(('nt','NRH'), 80.)
ds_met['rain']=const(('nt','Nrain'), 0.)
ds_met['cloud']=const(('nt','Ncloud'), 0.5)
return ds_met
def partition(self):
if self.restart:
# multiprocessor files except the .<proc> suffix, to be symlinked
# or copied
parent_base=os.path.dirname(self.restart)
multi_proc_files=['celldata.dat','cells.dat',
'edgedata.dat','edges.dat',
'nodes.dat','topology.dat']
if os.path.exists(os.path.join(parent_base,'depths.dat-edge.0')):
multi_proc_files.append('depths.dat-edge')
for fn_base in multi_proc_files:
for proc in range(self.num_procs):
fn=fn_base+".%d"%proc
self.restart_copier(os.path.join(parent_base,fn),
os.path.join(self.run_dir,fn))
# single files
single_files=['vertspace.dat']
if 'DataLocations' in self.config:
# UNTESTED
single_files.append(self.config['DataLocations'])
if 'ProfileDataFile' in self.config:
# UNTESTED
single_files.append(self.config['ProfileDataFile'])
for fn in single_files:
self.restart_copier(os.path.join(parent_base,fn),
os.path.join(self.run_dir,fn))
else:
self.run_mpi(["-g",self.sun_verbose_flag,"--datadir=%s"%self.run_dir])
sun_verbose_flag="-vv"
def run_simulation(self):
args=['-s']
if self.restart:
args.append("-r")
args+=[self.sun_verbose_flag,"--datadir=%s"%self.run_dir]
self.run_mpi(args)
def run_mpi(self,sun_args):
sun="sun"
if self.sun_bin_dir is not None:
sun=os.path.join(self.sun_bin_dir,sun)
cmd=[sun] + sun_args
if self.num_procs>1:
mpiexec="mpiexec"
if self.mpi_bin_dir is not None:
mpiexec=os.path.join(self.mpi_bin_dir,mpiexec)
cmd=[mpiexec,"-n","%d"%self.num_procs] + cmd
subprocess.call(cmd)
# Methods related to using model output
def restartable_time(self):
"""
If store output is enabled, and this run has already been
executed, return the datetime64 of the restart files.
Otherwise None
"""
store_files=self.store_outputs()
if not store_files:
return None
store=store_file.StoreFile(model=self,proc=0,filename=store_files[0])
return store.time()
def store_outputs(self):
store_fn=os.path.join(self.run_dir,self.config['StoreFile'])
fns=glob.glob( store_fn+"*" )
fns.sort()
return fns
def start_inputs(self):
start_fn=os.path.join(self.run_dir,self.config['StartFile'])
fns=glob.glob( start_fn+"*" )
fns.sort()
return fns
def avg_outputs(self):
"""
with mergeArrays=1, these get sequenced with nstepsperncfile
with mergeArrays=0, each processor gets a file.
currently this function does not expose the difference
"""
if int(self.config['calcaverage']):
fns=glob.glob(os.path.join(self.run_dir,self.config['averageNetcdfFile']+"*"))
fns.sort()
return fns
else:
return []
def map_outputs(self):
"""
return a list of map output files -- if netcdf output is enabled,
that is what will be returned.
Guaranteed to be in the order of subdomain numbering if mergeArrays=0,
and in chronological order if mergeArrays=1.
Currently you can't distinguish which is which just from the output
of this method.
"""
if int(self.config['outputNetcdf']):
if self.config['mergeArrays'] is None or int(self.config['mergeArrays']):
# in this case the outputs are chunked in time
# with names like Estuary_SUNTANS.nc_0000.nc
# i.e. <outputNetcdfFile>_<seqN>.nc
fns=glob.glob(os.path.join(self.run_dir,self.config['outputNetcdfFile']+"_*.nc"))
fns.sort()
return fns
else:
# convoluted, but allow for some of the odd name construction for
# per-domain files, relying only on the assumption that the
# suffix is the processor number.
fns=glob.glob(os.path.join(self.run_dir,self.config['outputNetcdfFile']+"*"))
procs=[int(fn.split('.')[-1]) for fn in fns]
order=np.argsort(procs)
fns=[fns[i] for i in order]
return fns
else:
raise Exception("Need to implement map output filenames for non-netcdf")
@classmethod
def parse_profdata(cls,fn):
"""
Parse the profdata.dat file associated with a run.
fn: path to file to parse.
This is a classmethod to allow external usage but keep it bundled with the
SunDriver class.
Returns an xarray dataset
NOTE: if this uses caching at some point in the future, monitor_output should
be adapted to make a copy since it mutates the dataset.
data format:
(4 byte int)numTotalDataPoints: Number of data points found on all processors. Note that
that this could be different from the number specified since some may lie outside the domain.
(4 byte int)numInterpPoints: Number of nearest neighbors to each point used for interpolation.
(4 byte int)NkmaxProfs: Number of vertical levels output in the profiles.
(4 byte int)nsteps: Total number of time steps in the simulation.
(4 byte int)ntoutProfs: Frequency of profile output. This implies a total of nsteps/ntoutProfs are output.
(8 byte double)dt: Time step size
(8 byte double array X NkmaxProfs)dz: Contains the vertical grid spacings.
(4 byte int array X numTotalDataPoints)allIndices: Contains the indices of each point that determines its
original location in the data file. This is mostly for debugging since the output data is resorted
so that it is in the same order as it appeared in the data file.
(4 byte int array X 2*numTotalDataPoints)dataXY: Contains the original data points at (or near) which profiles
are output.
(8 byte double array X numTotalDataPoints*numInterpPoints)xv: Array containing the x-locations of the nearest
neighbors to the dataXY points. If numInterpPoints=3, then the 3 closest neighbors to the point
(dataXY[2*i],dataXY[2*i+1]) are (xv[3*i],yv[3*i]), (xv[3*i+1],yv[3*i+1]), (xv[3*i+2],yv[3*i+2]).
(8 byte double array X numTotalDataPoints*numInterpPoints)yv: Array containing the y-locations of the nearest
neighbors to the dataXY points (see xv above).
"""
pdata=xr.Dataset()
with open(fn,'rb') as fp:
hdr_ints = np.fromfile(fp,np.int32,count=5)
pdata['num_total_data_points']=(),hdr_ints[0]
pdata['num_interp_points'] =(), hdr_ints[1]
pdata['nkmax_profs'] =(), hdr_ints[2]
pdata['nsteps'] =(), hdr_ints[3]
pdata['ntout_profs'] =(), hdr_ints[4]
pdata['dt'] =(), np.fromfile(fp,np.float64,1)[0]
pdata['dzz'] = ('layer',),np.fromfile(fp,np.float64,pdata['nkmax_profs'].item() )
pdata['all_indices'] = np.fromfile(fp,np.int32,pdata['num_total_data_points'].item())
dataxy = np.fromfile(fp,np.float64,2*pdata['num_total_data_points'].item())
pdata['request_xy'] =('request','xy'), dataxy.reshape( (-1,2) )
pdata['request_xy'].attrs['description']="Coordinates of the requested profiles"
xvyv = np.fromfile(fp,np.float64,2*(pdata['num_total_data_points']*pdata['num_interp_points']).item())
pdata['prof_xy'] =('profile','xy'), xvyv.reshape( (2,-1) ).transpose()
pdata['prof_xy'].attrs['description']="Coordinates of the output profiles"
return pdata
def read_profile_data_raw(self,scalar,pdata=None,memmap=True):
"""
scalar is one of HorizontalVelocityFile,
FreeSurfaceFile, etc
pdata: a previously parsed ProfData file, from parse_profdata. Can be passed
in to avoid re-parsing this file.
memmap: by default the file is memory mapped, which can be a huge performance
savings for large files. In some cases and platforms it is less stable,
though.
"""
if pdata is None:
pdata=self.parse_profdata(self.file_path('ProfileDataFile'))
prof_pnts = pdata.prof_xy
prof_len = prof_pnts.shape[0]
prof_fname = self.file_path(scalar) + ".prof"
if not os.path.exists(prof_fname):
log.debug("Request for profile for %s, but %s does not exist"%(scalar,prof_fname))
return None
# Figure out the shape of the output:
# I'm assuming that profile data gets spat out in the same
# ordering of dimensions as regular grid-based data
shape_per_step = []
# profiles.c writes u first then v, then w, each with a
# separate call to Write3DData()
if scalar == 'HorizontalVelocityFile':
shape_per_step.append(3)
# the outer loop is over profile points
shape_per_step.append(prof_len)
# And does it have z-levels? if so, that is the inner-most
# loop, so the last dimension of the array
if scalar != 'FreeSurfaceFile':
nkmax_profs = pdata['nkmax_profs'].item()
shape_per_step.append(nkmax_profs)
# better to use the size of the specific file we're opening:
prof_dat_size=os.stat(prof_fname).st_size
REALSIZE=8
bytes_per_step = REALSIZE * np.prod( np.array(shape_per_step) )
n_steps_in_file=int(prof_dat_size//bytes_per_step )
final_shape = tuple([n_steps_in_file] + shape_per_step)
if memmap: # BRAVE!
# print "Trying to memory map the data.."
data = np.memmap(prof_fname, dtype=np.float64, mode='r', shape=final_shape)
else:
data = np.fromfile(prof_fname,float64)
data = data.reshape(*final_shape)
# no caching at this point..
return data
monitor_nodata=999999
monitor_dv=None # caches dv_from_map results
def monitor_output(self,nan_nodata=False,dv_from_map=False):
"""
Return xarray Dataset including the monitor output
"""
if 'DataLocations' not in self.config: return None
pdata=self.parse_profdata(self.file_path('ProfileDataFile'))
file_to_var={'FreeSurfaceFile':'eta',
'HorizontalVelocityFile':'u',
'TemperatureFile':'temp',
'SalinityFile':'salt',
'EddyViscosityFile':'nut',
'VerticalVelocityFile':'w',
'ScalarDiffusivityFile':'kappa'}
# Try to figure out which variables have been output in profiles
# Just scan what's there, to avoid trying to figure out defaults.
for scalar in list(file_to_var.keys()):
raw_data=self.read_profile_data_raw(scalar,pdata=pdata)
if raw_data is not None:
if scalar=='FreeSurfaceFile':
dims=('time','profile')
elif scalar=='HorizontalVelocityFile':
dims=('time','xyz','profile','layer')
else:
dims=('time','profile','layer')
# May need to use a different layer dimension for w...
# print("%s: raw data shape: %s dims: %s"%(scalar,str(raw_data.shape),dims))
if nan_nodata and np.any(raw_data==self.monitor_nodata):
# this can significantly slow down the process if ultimately we're
# only going to use a small slice of the data
raw_data=np.where(raw_data==self.monitor_nodata,
np.nan,raw_data)
pdata[file_to_var[scalar]]=dims,raw_data
# This may need some tweaking, but it's a start.
# use microseconds to get some reasonable precision for fraction dt
# but note that this isn't necessarily exact.
dt_prof=np.timedelta64( int( pdata['ntout_profs']*pdata['dt']*1e6),'us')
pdata['time']=('time',),(self.run_start + dt_prof*np.arange(pdata.dims['time']))
if dv_from_map:
if self.monitor_dv is None:
if 0: # read from map file, but that may not be valid until end of run
print("Loading dv for monitor data - should happen once!")
self.monitor_dv=self.extract_station_map(xy=pdata.prof_xy.values[:,:],data_vars='dv')
else: # read from subdomain grids.
mon_dv=np.zeros(pdata.dims['profile'],np.float64)
mon_dv[:]=np.nan
for proc in range(self.num_procs):
gsub=self.subdomain_grid(proc)
for i,xy in enumerate(pdata.prof_xy.values):
c=gsub.select_cells_nearest(xy,inside=True)
if c is not None:
mon_dv[i]=gsub.cells[c]['dv']
assert np.all(np.isfinite(mon_dv)),"Failed to get depths for all profile locatins"
self.monitor_dv=xr.Dataset()
self.monitor_dv['dv']=('profile',),mon_dv
pdata['dv']=('profile',),self.monitor_dv['dv'].values
# Total hack for convenience -- add a closest_to([x,y]) method to extract a single
# profile.
@utils.add_to(pdata)
def closest_to(self,target):
dists=utils.dist(target,self['prof_xy'].values)
idx=np.argmin(dists)
return self.isel(profile=idx)
return pdata
_subdomain_grids=None
def subdomain_grid(self,p):
if self._subdomain_grids is None:
self._subdomain_grids={}
if p not in self._subdomain_grids:
sub_g=unstructured_grid.UnstructuredGrid.read_suntans_hybrid(path=self.run_dir,
points='points.dat',
edges='edges.dat.%d'%p,
cells='cells.dat.%d'%p)
# edge depth is an ad-hoc extension, not "standard" enough to be in
# read_suntans_hybrid, so add it in here:
edge_depth_fn=self.file_path('depth')+"-edge.%d"%p
if os.path.exists(edge_depth_fn):
edge_xyz=np.loadtxt(edge_depth_fn)
# 2019-05-29: this did not have a negation. probably that was wrong.
# transition away from edge_depth, anyway.
# sub_g.add_edge_field('edge_depth',edge_xyz[:,2])
sub_g.add_edge_field('edge_z_bed',-edge_xyz[:,2])
if ('dv' in sub_g.cells.dtype.names) and ('z_bed' not in sub_g.cells.dtype.names):
sub_g.add_cell_field('z_bed',-sub_g.cells['dv'])
self._subdomain_grids[p]=sub_g
return self._subdomain_grids[p]
@memoize.imemoize(lru=64)
def extract_transect_monitor(self,xy=None,ll=None,time=None,
time_mode='inner',dv_from_map=False,
dzmin_surface=None):
"""
In progress alternate approach for transects.
xy: [N,2] location of vertical profiles making up the transect
ll: like xy, but lon/lat to be converted via self.ll_to_native
time: can be used to pull a specific time for each xy (with time_mode='inner').
time_mode: for now, only 'inner'. May be expanded to control whether
time is used orthogonal to xy, or parallel (i.e. for each xy, do we pull
one corresponding time from time, or pull all of the time for each).
if time is not covered by the output, or the run has no monitor output,
will return None.
"""
if xy is None:
xy=self.ll_to_native(ll)
if time_mode=='outer':
assert time.ndim==0,"Not ready for array-valued time with time_mode='outer'"
def xyt():
if time_mode=='inner':
for loc,t in zip(xy,time):
yield loc,t
else:
for loc in xy:
yield loc,time
stns=[]
for loc,t in xyt():
if time_mode=='inner':
# then each location has a single time associated with it
# we can narrow extract_station in that case.
t_slice=(t,t)
else:
# potentially a range of times
# this should also a work when time is a scalar datetime64.
t_slice=(t.min(),t.max())
stn=self.extract_station_monitor(xy=loc,chain_count=t_slice,
dv_from_map=dv_from_map)
if stn is None:
log.warning('Found no monitor data for %s. Skip transect'%str(t_slice))
return None
if np.isscalar(t):
if (t<stn.time.values[0]) or (t>stn.time.values[-1]):
log.info("Requested time %s is outside the range of the model output"%t)
return None
ti=utils.nearest(stn.time.values,t)
stn=stn.isel(time=ti)
stns.append(stn)
tran=xr.concat(stns,dim='time')
# now cleanup nan/nodata
for v in tran.data_vars:
if not np.issubdtype(tran[v].dtype,np.floating): continue
missing=tran[v].values==self.monitor_nodata
tran[v].values[missing]=np.nan
xy=np.c_[ tran.station_x,tran.station_y ]
tran=tran.rename(time='sample')
tran['d_sample']=('sample',),utils.dist_along(xy)
if 'dzz' in tran:
assert 'eta' in tran,"Not ready for transect processing without eta"
dzz_2d,eta_2d=xr.broadcast(tran.dzz,tran.eta)
z_max=eta_2d
#Not ready for this.
if 'dv' in tran:
_,dv_2d=xr.broadcast(eta_2d,tran.dv)
z_min=-dv_2d
else:
z_min=-np.inf
tran['z_bot']=-dzz_2d.cumsum(dim='layer')
tran['z_top']=tran.z_bot+dzz_2d
tran['z_bot']=tran.z_bot.clip(z_min,z_max)
tran['z_top']=tran.z_top.clip(z_min,z_max)
tran['z_ctr']=0.5*(tran.z_bot+tran.z_top)
for fld in ['z_bot','z_top','z_ctr']:
tran[fld].attrs['positive']='up'
# to be consistent with xr_transect, and np.diff(z_ctr),
# z_dz is _negative_
tran['z_dz'] =(tran.z_bot-tran.z_top)
if dzmin_surface is not None:
self.adjust_transect_for_dzmin_surface(tran,dzmin_surf=dzmin_surface)
return tran
def adjust_transect_for_dzmin_surface(self,tran,update_vars=['salt','temp'],dzmin_surf=0.25):
"""
janky - it is not always clear in the output which layers are valid, versus when a layer
was really thin and was coalesced with the next layer down. This method
takes an xr_transect style transect, finds thin surface layers and copies the values from
lower down up to the surface cells.
This currently probably doesn't work for velocity, just scalar.
extract_transect_monitor will call this automatically if dzmin_surface is specified.
"""
from ... import xr_transect
z_dz=xr_transect.get_z_dz(tran)
for samp_i in range(tran.dims['sample']):
eta=tran.eta.isel(sample=samp_i)
k_update=[]
for k in range(tran.dims['layer']):
if z_dz[samp_i,k]==0.0:
continue # truly dry
elif tran.eta[samp_i] - tran.z_bot[samp_i,k] < dzmin_surf:
log.debug("[sample %s,k %s] too thin"%(samp_i,k))
k_update.append(k)
else:
# valid layer
for ku in k_update:
for v in update_vars:
tran[v].values[samp_i,ku] = tran[v].values[samp_i,k]
break
def extract_transect(self,xy=None,ll=None,time=slice(None),dx=None,
vars=['uc','vc','Ac','dv','dzz','eta','w']):
"""
xy: [N,2] coordinates defining the line of the transect
time: if an integer or slice of integers, interpret as index
into time dimension. otherwise try to convert to datetime64,
and then index into time coordinate.
dx: omit to use xy as is, or a length scale for resampling xy
returns xr.Dataset, unless xy does not intersect the grid at all,
in which case None is returned.
Simple chaining is allowed, but if time spans two runs, the later
run will be used.
"""
if xy is None:
xy=self.ll_to_native(ll)
if dx is not None:
xy=linestring_utils.upsample_linearring(xy,dx,closed_ring=False)
# check for chaining
if np.issubdtype(type(time),np.integer):
# time came in as an index, so no chaining.
pass
else:
# asarray() helps avoid xarray annoyance
dt=np.max(utils.to_dt64(np.asarray(time)))
if dt<self.run_start:
log.info("extract_transect: chain back")
run=self.chain_restarts(count=dt)[0]
if run is not self: # avoid inf. recursion
return run.extract_transect(xy=xy,ll=ll,time=time,dx=dx,
vars=vars)
else:
log.info("extract_transect: chain back just returned self.")
proc_point_cell=np.zeros( [self.num_procs,len(xy)], np.int32)-1
point_datasets=[None]*len(xy)
good_vars=None # set on-demand below
merged=int(self.config['mergeArrays'])>0
def gen_sources(): # iterator over proc,sub_g,map_fn
if merged:
map_fn=self.map_outputs()[0]
g=unstructured_grid.UnstructuredGrid.from_ugrid(map_fn)
yield [0,g,map_fn]
else:
for proc in range(self.num_procs):
yield proc,self.subdomain_grid(proc),self.map_outputs()[proc]
def time_to_isel(ds,times,mode='nearest'):
"""
return an argument suitable for isel, to pull one or more time steps
from ds.
ds: dataset with time dimension
times: integer, datetime64, or slice thereof.
mode: 'nearest' map a time to the nearest matching time
'before' map a time to the matching or preceding time step
'after' map a timem to the following time step.
"""
if isinstance(times,slice):
return slice(time_to_isel(ds,times.start,mode='before'),
time_to_isel(ds,times.stop,mode='after'))
else:
if np.issubdtype(type(times),np.integer):
# already an index
return times
else:
dns=utils.to_dnum(ds.time.values)
dn=utils.to_dnum(times)
if mode=='nearest':
return utils.nearest(dns,dn)
elif mode=='before':
return np.searchsorted(dns,dn)
elif mode=='after':
return np.searchsorted(dns,dn,side='right')
else:
raise Exception("Bad mode: %s"%mode)
for proc,sub_g,map_fn in gen_sources():
ds=None
for pnti,pnt in enumerate(xy):
if point_datasets[pnti] is not None:
continue
c=sub_g.select_cells_nearest(pnt,inside=True)
if c is not None:
proc_point_cell[proc,pnti]=c
if ds is None:
ds=xr.open_dataset(map_fn)
# doctor up the Nk dimensions
ds['Nkf']=ds['Nk'] # copy the variable
del ds['Nk'] # delete old variable, leaving Nk as just a dimension
if good_vars is None:
# drop any variables that don't appear in the output
good_vars=[v for v in vars if v in ds]
time_idx=time_to_isel(ds,time)
point_ds=ds[good_vars].isel(time=time_idx,Nc=c)
point_ds['x_sample']=pnt[0]
point_ds['y_sample']=pnt[1]
point_datasets[pnti]=point_ds
# drop xy points that didn't hit a cell
point_datasets=[p for p in point_datasets if p is not None]
if len(point_datasets)==0: # transect doesn't intersect grid at all.
log.debug("Transect points do not intersect model")
return None
transect=xr.concat(point_datasets,dim='sample')
renames=dict(Nk='layer',Nkw='interface',
uc='Ve',vc='Vn',w='Vu_int')
renames={x:renames[x] for x in renames if (x in transect) or (x in transect.dims)}
transect=transect.rename(**renames)
transect['U']=('sample','layer','xy'),np.concatenate( [transect.Ve.values[...,None],
transect.Vn.values[...,None]],
axis=-1)
if 'Vu_int' in transect:
Vu_int=transect.Vu_int.values.copy()
Vu_int[np.isnan(Vu_int)]=0.0
transect['Vu']=('sample','layer'), 0.5*(Vu_int[:,1:] + Vu_int[:,:-1])
# construct layer-center depths
if 'dzz' not in transect:
# fabricate a dzz
eta_2d,dv_2d,z_w_2d=xr.broadcast( transect['eta'], transect['dv'], -ds['z_w'])
z_w_2d=z_w_2d.clip(-dv_2d,eta_2d)
z_bot=z_w_2d.isel(Nkw=slice(1,None))
z_top=z_w_2d.isel(Nkw=slice(None,-1))
# must use values to avoid xarray getting smart with aligning axes.
dzz=z_top.values-z_bot.values
z_ctr=0.5*(z_bot.values+z_top.values)
z_ctr[dzz==0.0]=np.nan
else:
dzz=transect.dzz.values.copy() # sample, Nk
z_bot=transect['eta'].values[:,None] - dzz.cumsum(axis=1)
z_top=z_bot+dzz
z_ctr=0.5*(z_top+z_bot)
z_ctr[dzz==0.0]=np.nan # indicate no data
transect['z_ctr']=('sample','layer'), z_ctr
transect['z_top']=('sample','layer'), z_top
transect['z_bot']=('sample','layer'), z_bot
# first, the interior interfaces
def choose_valid(a,b):
return np.where(np.isfinite(a),a,b)
z_int=choose_valid(z_top[:,1:],z_bot[:,:-1])
# and no choice of where the first and last rows come from
z_int=np.concatenate( [z_top[:,:1],
z_int,
z_bot[:,-1:]],
axis=1)
transect['z_int']=('sample','interface'),z_int
# we've got dzz, so go ahead and use it, but honor xr_transect
# sign convention that z_dz ~ diff(z_int)
transect['z_dz']=('sample','layer'),-dzz
# helps with plotting
transect.attrs['source']=self.run_dir
return transect
warn_initial_water_level=0
def initial_water_level(self):
"""
some BC methods which want a depth need an estimate of the water surface
elevation, and the initial water level is as good a guess as any.
"""
if self.ic_ds is not None:
return float(self.ic_ds.eta.mean())
else:
if self.warn_initial_water_level==0:
log.warning("Request for initial water level, but no IC is set yet")
self.warn_initial_water_level+=1
return 0.0
def extract_station_monitor(self,xy=None,ll=None,chain_count=1,
dv_from_map=False,data_vars=None):
"""
Return a dataset for a single point in the model
xy: native model coordinates, [Nstation,2]
ll: lon/lat coordinates, [Nstation,2]
chain_count: max number of restarts to go back.
1=>no chaining just this model. None or 0:
chain all runs possible. Otherwise, go back max
number of runs up to chain_count
if chain_count is a np.datetime64, go back enough restarts to
get to that date (see chain_restarts())
if chain_count is a tuple of datetime64, only consider restarts covering
that period.
This version pulls output from history files
if dv_from_map is True, additionally pulls dv from map output.
if no data matches the time range of chain_count, or profile output
wasn't enable, returns None.
"""
if xy is None:
xy=self.ll_to_native(ll)
if chain_count!=1:
restarts=self.chain_restarts(count=chain_count,load_grid=False)
# dv should be constant, so only load it on self.
dss=[mod.extract_station_monitor(xy=xy,ll=ll,chain_count=1,
data_vars=data_vars,dv_from_map=False)
for mod in restarts]
if len(dss)==0:
return None
chained=xr.concat(dss,dim='time',data_vars='minimal')
if dv_from_map:
# just to get dv...
me=self.extract_station_monitor(xy=xy,ll=ll,chain_count=1,
dv_from_map=True)
chained['dv']=me.dv
return chained
mon=self.monitor_output(dv_from_map=dv_from_map)
if mon is None:
return None # maybe profile output wasn't enabled.
xy=np.asarray(xy)
orig_ndim=xy.ndim
if orig_ndim==1:
xy=xy[None,:]
elif orig_ndim>2:
raise Exception("Can only handle single coordinates or an list of coordinates")
num_stations=len(xy)
stations=[]
for stn in range(num_stations):
dists=utils.dist(xy[stn,:],mon.prof_xy.values)
best=np.argmin(dists)
station=mon.isel(profile=best)
if data_vars is not None:
for v in list(station.data_vars):
if v not in data_vars:
del station[v]
station['distance_from_target']=(),dists[best]
station['profile_index']=best
station['source']='monitor'
stations.append(station)
if orig_ndim==1:
# This used to be done after the fact -- just isel(station=0)
# but concatenation in xarray is super slow
combined_ds=stations[0]
combined_ds['station_x']=(), xy[0,0]
combined_ds['station_y']=(), xy[0,1]
else:
combined_ds=xr.concat(stations,dim='station')
combined_ds['station_x']=('station',), xy[...,0]
combined_ds['station_y']=('station',), xy[...,1]
return combined_ds
def extract_station(self,xy=None,ll=None,chain_count=1,source='auto',dv_from_map=False,
data_vars=None):
"""
See extract_station_map, extract_station_monitor for details.
Will try monitor output if it exists, otherwise map output.
source: 'auto' (default), 'map' or 'monitor' to force a choice.
If a specific source is chosen and doesn't exist, returns None
"""
if source in ['auto','monitor']:
ds=self.extract_station_monitor(xy=xy,ll=ll,chain_count=chain_count,
dv_from_map=dv_from_map,data_vars=data_vars)
if (ds is not None) or (source=='monitor'):
return ds
if source in ['auto','map']:
return self.extract_station_map(xy=xy,ll=ll,chain_count=chain_count,
data_vars=data_vars)
assert False,"How did we get here"
def extract_station_map(self,xy=None,ll=None,chain_count=1,data_vars=None):
"""
Return a dataset for a single point in the model
xy: native model coordinates, [Nstation,2]
ll: lon/lat coordinates, [Nstation,2]
chain_count: max number of restarts auto go back.
1=>no chaining just this model. None or 0:
chain all runs possible. Otherwise, go back max
number of runs up to chain_count
data_vars: list of variables to include, otherwise all.
This version pulls output from map files
"""
if xy is None:
xy=self.ll_to_native(ll)
map_fns=self.map_outputs()
# First, map request locations to processor and cell
xy=np.asarray(xy)
orig_ndim=xy.ndim
if orig_ndim==1:
xy=xy[None,:]
elif orig_ndim>2:
raise Exception("Can only handle single coordinates or an list of coordinates")
num_stations=len(xy)
# allocate, [proc,cell,distance] per point
matches=[[None,None,np.inf] for i in range(num_stations)]
# outer loop on proc
for proc,map_fn in enumerate(map_fns):
map_ds=xr.open_dataset(map_fn)
g=unstructured_grid.UnstructuredGrid.from_ugrid(map_ds)
cc=g.cells_center()
# inner loop on station
for station in range(num_stations):
c=g.select_cells_nearest(xy[station],inside=False)
d=utils.dist(cc[c],xy[station])
if d<matches[station][2]:
matches[station]=[proc,c,d]
# Now we know exactly which procs are useful, and can close
# the others
hot_procs={} # dictionary tracking which processors are useful
for station,(proc,c,d) in enumerate(matches):
hot_procs[proc]=(station,c,d)
for proc,map_fn in enumerate(map_fns):
if proc not in hot_procs:
xr.open_dataset(map_fn).close()
# otherwise close later
if chain_count==1:
runs=[self]
else:
runs=self.chain_restarts(count=chain_count)
dss=[] # per-restart datasets
# workaround for cases where numsides was not held constant
max_numsides=0
min_numsides=1000000
for run in runs:
model_out=None
for proc,map_fn in enumerate(run.map_outputs()):
if proc not in hot_procs: continue # doesn't have any hits
map_ds=xr.open_dataset(run.map_outputs()[proc])
# Work around bad naming of dimensions
map_ds['Nk_c']=map_ds['Nk']
del map_ds['Nk']
# wait until we've loaded one to initialize the dataset for this run
if model_out is None:
model_out=xr.Dataset() # not middle out
model_out['time']=map_ds.time
# allocate output variables:
for d in map_ds.data_vars:
if data_vars and d not in data_vars:
log.debug('Skipping variable %s'%d)
continue
if 'Nc' in map_ds[d].dims:
# put station first
new_dims=['station']
new_shape=[num_stations]
for d_dim in map_ds[d].dims:
if d_dim=='Nc':
continue # replaced by station above
else:
new_dims.append(d_dim)
new_shape.append(map_ds.dims[d_dim])
model_out[d]=tuple(new_dims), np.zeros(new_shape, map_ds[d].dtype )
# For vectorized indexing, pulls the stations we want want from this
# processor, but only gets as far as ordering them densely on this
# proc
Nc_indexer=xr.DataArray( [m[1] for m in matches if m[0]==proc ],
dims=['proc_station'] )
assert len(Nc_indexer),"Somehow this proc has no hits"
# and the station indexes in model_out to assign to.
# this can't use vectorized indexing because you can't assign to the
# result of vectorized indexing.
proc_stations=np.array( [i for i,m in enumerate(matches) if m[0]==proc] )
for d in map_ds.data_vars:
if d not in model_out: continue
# potentially gets one isel out of the tight loop
# this appears to work.
extracted=map_ds[d].isel(Nc=Nc_indexer)
# extracted will have 'station' in the wrong place. transpose
dims=['proc_station'] + [d for d in extracted.dims if d!='proc_station']
extractedT=extracted.transpose(*dims)
# this line is 90% of the time:
ext_vals=extractedT.values
model_out[d].values[proc_stations,...] = ext_vals
#for station in lin_idx,arr_idx in enumerate(np.ndindex(stn_shape)):
# if matches[lin_idx][0]!=proc: continue
#
# extracted=map_ds[d].isel(Nc=matches[lin_idx][1])
# # seems like voodoo -- construct an index into the output,
# # which will let us point to the desired station.
# sel=dict(zip(stn_dims,arr_idx))
# model_out[d].isel(sel).values[...]=extracted
if dss:
# limit to non-overlapping
time_sel=model_out.time.values>dss[-1].time.values[-1]
model_out=model_out.isel(time=time_sel)
if 'numsides' in model_out.dims:
max_numsides=max(max_numsides,model_out.dims['numsides'])
min_numsides=min(min_numsides,model_out.dims['numsides'])
dss.append(model_out)
if 'numsides' in model_out.dims:
if max_numsides!=min_numsides:
log.warning("numsides varies %d to %d over restarts. Kludge around"%
(min_numsides,max_numsides))
dss=[ ds.isel(numsides=slice(0,min_numsides)) for ds in dss]
combined_ds=xr.concat(dss,dim='time',data_vars='minimal',coords='minimal')
# copy from matches
combined_ds['distance_from_target']=('station',), np.zeros(num_stations, np.float64)
combined_ds['subdomain']=('station',), np.zeros(num_stations,np.int32)
combined_ds['station_cell']=('station',), np.zeros(num_stations,np.int32)
combined_ds['station_cell'].attrs['description']="Cell index in subdomain grid"
combined_ds['station_x']=('station',), xy[...,0]
combined_ds['station_y']=('station',), xy[...,1]
combined_ds['source']=('station',), ["map"]*num_stations
for station in range(num_stations):
# here we know the order and can go straight to values
combined_ds['distance_from_target'].values[station]=matches[station][2]
combined_ds['subdomain'].values[station]=matches[station][0]
if orig_ndim==1:
combined_ds=combined_ds.isel(station=0)
return combined_ds
| mit |
vortex-ape/scikit-learn | sklearn/cluster/optics_.py | 2 | 34019 | # -*- coding: utf-8 -*-
"""Ordering Points To Identify the Clustering Structure (OPTICS)
These routines execute the OPTICS algorithm, and implement various
cluster extraction methods of the ordered list.
Authors: Shane Grigsby <refuge@rocktalus.com>
Amy X. Zhang <axz@mit.edu>
License: BSD 3 clause
"""
from __future__ import division
import warnings
import numpy as np
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ._optics_inner import quick_scan
def optics(X, min_samples=5, max_eps=np.inf, metric='euclidean',
p=2, metric_params=None, maxima_ratio=.75,
rejection_ratio=.7, similarity_threshold=0.4,
significant_min=.003, min_cluster_size=.005,
min_maxima_ratio=0.001, algorithm='ball_tree',
leaf_size=30, n_jobs=None):
"""Perform OPTICS clustering from vector array
OPTICS: Ordering Points To Identify the Clustering Structure
Closely related to DBSCAN, finds core sample of high density and expands
clusters from them. Unlike DBSCAN, keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large point datasets than
the current sklearn implementation of DBSCAN.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : array, shape (n_samples, n_features)
The data.
min_samples : int (default=5)
The number of samples in a neighborhood for a point to be considered
as a core point.
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for them to be considered
as in the same neighborhood. Default value of "np.inf" will identify
clusters across all scales; reducing `max_eps` will result in
shorter run times.
metric : string or callable, optional (default='euclidean')
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
maxima_ratio : float, optional (default=.75)
The maximum ratio we allow of average height of clusters on the
right and left to the local maxima in question. The higher the
ratio, the more generous the algorithm is to preserving local
minima, and the more cuts the resulting tree will have.
rejection_ratio : float, optional (default=.7)
Adjusts the fitness of the clustering. When the maxima_ratio is
exceeded, determine which of the clusters to the left and right to
reject based on rejection_ratio. Higher values will result in points
being more readily classified as noise; conversely, lower values will
result in more points being clustered.
similarity_threshold : float, optional (default=.4)
Used to check if nodes can be moved up one level, that is, if the
new cluster created is too "similar" to its parent, given the
similarity threshold. Similarity can be determined by 1) the size
of the new cluster relative to the size of the parent node or
2) the average of the reachability values of the new cluster
relative to the average of the reachability values of the parent
node. A lower value for the similarity threshold means less levels
in the tree.
significant_min : float, optional (default=.003)
Sets a lower threshold on how small a significant maxima can be.
min_cluster_size : int > 1 or float between 0 and 1 (default=0.005)
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded
to be at least 2).
min_maxima_ratio : float, optional (default=.001)
Used to determine neighborhood size for minimum cluster membership.
Each local maxima should be a largest value in a neighborhood
of the `size min_maxima_ratio * len(X)` from left and right.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree` (default)
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
See also
--------
OPTICS
An estimator interface for this clustering algorithm.
dbscan
A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, and Jörg Sander.
"OPTICS: ordering points to identify the clustering structure." ACM SIGMOD
Record 28, no. 2 (1999): 49-60.
"""
clust = OPTICS(min_samples, max_eps, metric, p, metric_params,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min,
min_cluster_size, min_maxima_ratio,
algorithm, leaf_size, n_jobs)
clust.fit(X)
return clust.core_sample_indices_, clust.labels_
class OPTICS(BaseEstimator, ClusterMixin):
"""Estimate clustering structure from vector array
OPTICS: Ordering Points To Identify the Clustering Structure
Closely related to DBSCAN, finds core sample of high density and expands
clusters from them. Unlike DBSCAN, keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large point datasets than
the current sklearn implementation of DBSCAN.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
min_samples : int (default=5)
The number of samples in a neighborhood for a point to be considered
as a core point.
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for them to be considered
as in the same neighborhood. Default value of "np.inf" will identify
clusters across all scales; reducing `max_eps` will result in
shorter run times.
metric : string or callable, optional (default='euclidean')
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
maxima_ratio : float, optional (default=.75)
The maximum ratio we allow of average height of clusters on the
right and left to the local maxima in question. The higher the
ratio, the more generous the algorithm is to preserving local
minima, and the more cuts the resulting tree will have.
rejection_ratio : float, optional (default=.7)
Adjusts the fitness of the clustering. When the maxima_ratio is
exceeded, determine which of the clusters to the left and right to
reject based on rejection_ratio. Higher values will result in points
being more readily classified as noise; conversely, lower values will
result in more points being clustered.
similarity_threshold : float, optional (default=.4)
Used to check if nodes can be moved up one level, that is, if the
new cluster created is too "similar" to its parent, given the
similarity threshold. Similarity can be determined by 1) the size
of the new cluster relative to the size of the parent node or
2) the average of the reachability values of the new cluster
relative to the average of the reachability values of the parent
node. A lower value for the similarity threshold means less levels
in the tree.
significant_min : float, optional (default=.003)
Sets a lower threshold on how small a significant maxima can be.
min_cluster_size : int > 1 or float between 0 and 1 (default=0.005)
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded
to be at least 2).
min_maxima_ratio : float, optional (default=.001)
Used to determine neighborhood size for minimum cluster membership.
Each local maxima should be a largest value in a neighborhood
of the `size min_maxima_ratio * len(X)` from left and right.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree` (default)
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
core_sample_indices_ : array, shape (n_core_samples,)
Indices of core samples.
labels_ : array, shape (n_samples,)
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
reachability_ : array, shape (n_samples,)
Reachability distances per sample.
ordering_ : array, shape (n_samples,)
The cluster ordered list of sample indices
core_distances_ : array, shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
See also
--------
DBSCAN
A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel, and Jörg Sander.
"OPTICS: ordering points to identify the clustering structure." ACM SIGMOD
Record 28, no. 2 (1999): 49-60.
"""
def __init__(self, min_samples=5, max_eps=np.inf, metric='euclidean',
p=2, metric_params=None, maxima_ratio=.75,
rejection_ratio=.7, similarity_threshold=0.4,
significant_min=.003, min_cluster_size=.005,
min_maxima_ratio=0.001, algorithm='ball_tree',
leaf_size=30, n_jobs=None):
self.max_eps = max_eps
self.min_samples = min_samples
self.maxima_ratio = maxima_ratio
self.rejection_ratio = rejection_ratio
self.similarity_threshold = similarity_threshold
self.significant_min = significant_min
self.min_cluster_size = min_cluster_size
self.min_maxima_ratio = min_maxima_ratio
self.algorithm = algorithm
self.metric = metric
self.metric_params = metric_params
self.p = p
self.leaf_size = leaf_size
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform OPTICS clustering
Extracts an ordered list of points and reachability distances, and
performs initial clustering using `max_eps` distance specified at
OPTICS object instantiation.
Parameters
----------
X : array, shape (n_samples, n_features)
The data.
y : ignored
Returns
-------
self : instance of OPTICS
The instance.
"""
X = check_array(X, dtype=np.float)
n_samples = len(X)
if self.min_samples > n_samples:
raise ValueError("Number of training samples (n_samples=%d) must "
"be greater than min_samples (min_samples=%d) "
"used for clustering." %
(n_samples, self.min_samples))
if self.min_cluster_size <= 0 or (self.min_cluster_size !=
int(self.min_cluster_size)
and self.min_cluster_size > 1):
raise ValueError('min_cluster_size must be a positive integer or '
'a float between 0 and 1. Got %r' %
self.min_cluster_size)
elif self.min_cluster_size > n_samples:
raise ValueError('min_cluster_size must be no greater than the '
'number of samples (%d). Got %d' %
(n_samples, self.min_cluster_size))
# Start all points as 'unprocessed' ##
self.reachability_ = np.empty(n_samples)
self.reachability_.fill(np.inf)
self.core_distances_ = np.empty(n_samples)
self.core_distances_.fill(np.nan)
# Start all points as noise ##
self.labels_ = np.full(n_samples, -1, dtype=int)
nbrs = NearestNeighbors(n_neighbors=self.min_samples,
algorithm=self.algorithm,
leaf_size=self.leaf_size, metric=self.metric,
metric_params=self.metric_params, p=self.p,
n_jobs=self.n_jobs)
nbrs.fit(X)
self.core_distances_[:] = nbrs.kneighbors(X,
self.min_samples)[0][:, -1]
self.ordering_ = self._calculate_optics_order(X, nbrs)
indices_, self.labels_ = _extract_optics(self.ordering_,
self.reachability_,
self.maxima_ratio,
self.rejection_ratio,
self.similarity_threshold,
self.significant_min,
self.min_cluster_size,
self.min_maxima_ratio)
self.core_sample_indices_ = indices_
return self
# OPTICS helper functions
def _calculate_optics_order(self, X, nbrs):
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
ordering_idx = 0
for point in range(X.shape[0]):
if processed[point]:
continue
if self.core_distances_[point] <= self.max_eps:
while not processed[point]:
processed[point] = True
ordering[ordering_idx] = point
ordering_idx += 1
point = self._set_reach_dist(point, processed, X, nbrs)
else: # For very noisy points
ordering[ordering_idx] = point
ordering_idx += 1
processed[point] = True
return ordering
def _set_reach_dist(self, point_index, processed, X, nbrs):
P = X[point_index:point_index + 1]
indices = nbrs.radius_neighbors(P, radius=self.max_eps,
return_distance=False)[0]
# Getting indices of neighbors that have not been processed
unproc = np.compress((~np.take(processed, indices)).ravel(),
indices, axis=0)
# Keep n_jobs = 1 in the following lines...please
if not unproc.size:
# Everything is already processed. Return to main loop
return point_index
if self.metric == 'precomputed':
dists = X[point_index, unproc]
else:
dists = pairwise_distances(P, np.take(X, unproc, axis=0),
self.metric, n_jobs=None).ravel()
rdists = np.maximum(dists, self.core_distances_[point_index])
new_reach = np.minimum(np.take(self.reachability_, unproc), rdists)
self.reachability_[unproc] = new_reach
# Define return order based on reachability distance
return (unproc[quick_scan(np.take(self.reachability_, unproc),
dists)])
def extract_dbscan(self, eps):
"""Performs DBSCAN extraction for an arbitrary epsilon.
Extraction runs in linear time. Note that if the `max_eps` OPTICS
parameter was set to < inf for extracting reachability and ordering
arrays, DBSCAN extractions will be unstable for `eps` values close to
`max_eps`. Setting `eps` < (`max_eps` / 5.0) will guarantee
extraction parity with DBSCAN.
Parameters
----------
eps : float or int, required
DBSCAN `eps` parameter. Must be set to < `max_eps`. Equivalence
with DBSCAN algorithm is achieved if `eps` is < (`max_eps` / 5)
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
"""
check_is_fitted(self, 'reachability_')
if eps > self.max_eps:
raise ValueError('Specify an epsilon smaller than %s. Got %s.'
% (self.max_eps, eps))
if eps * 5.0 > (self.max_eps * 1.05):
warnings.warn(
"Warning, max_eps (%s) is close to eps (%s): "
"Output may be unstable." % (self.max_eps, eps),
RuntimeWarning, stacklevel=2)
# Stability warning is documented in _extract_dbscan method...
return _extract_dbscan(self.ordering_, self.core_distances_,
self.reachability_, eps)
def _extract_dbscan(ordering, core_distances, reachability, eps):
"""Performs DBSCAN extraction for an arbitrary epsilon (`eps`).
Parameters
----------
ordering : array, shape (n_samples,)
OPTICS ordered point indices (`ordering_`)
core_distances : array, shape (n_samples,)
Distances at which points become core (`core_distances_`)
reachability : array, shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`)
eps : float or int
DBSCAN `eps` parameter
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
"""
n_samples = len(core_distances)
is_core = np.zeros(n_samples, dtype=bool)
labels = np.zeros(n_samples, dtype=int)
far_reach = reachability > eps
near_core = core_distances <= eps
labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
labels[far_reach & ~near_core] = -1
is_core[near_core] = True
return np.arange(n_samples)[is_core], labels
def _extract_optics(ordering, reachability, maxima_ratio=.75,
rejection_ratio=.7, similarity_threshold=0.4,
significant_min=.003, min_cluster_size=.005,
min_maxima_ratio=0.001):
"""Performs automatic cluster extraction for variable density data.
Parameters
----------
ordering : array, shape (n_samples,)
OPTICS ordered point indices (`ordering_`)
reachability : array, shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`)
maxima_ratio : float, optional
The maximum ratio we allow of average height of clusters on the
right and left to the local maxima in question. The higher the
ratio, the more generous the algorithm is to preserving local
minima, and the more cuts the resulting tree will have.
rejection_ratio : float, optional
Adjusts the fitness of the clustering. When the maxima_ratio is
exceeded, determine which of the clusters to the left and right to
reject based on rejection_ratio. Higher values will result in points
being more readily classified as noise; conversely, lower values will
result in more points being clustered.
similarity_threshold : float, optional
Used to check if nodes can be moved up one level, that is, if the
new cluster created is too "similar" to its parent, given the
similarity threshold. Similarity can be determined by 1) the size
of the new cluster relative to the size of the parent node or
2) the average of the reachability values of the new cluster
relative to the average of the reachability values of the parent
node. A lower value for the similarity threshold means less levels
in the tree.
significant_min : float, optional
Sets a lower threshold on how small a significant maxima can be.
min_cluster_size : int > 1 or float between 0 and 1
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded
to be at least 2).
min_maxima_ratio : float, optional
Used to determine neighborhood size for minimum cluster membership.
Returns
-------
core_sample_indices_ : array, shape (n_core_samples,)
The indices of the core samples.
labels_ : array, shape (n_samples,)
The estimated labels.
"""
# Extraction wrapper
reachability = reachability / np.max(reachability[1:])
reachability_plot = reachability[ordering].tolist()
root_node = _automatic_cluster(reachability_plot, ordering,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min,
min_cluster_size, min_maxima_ratio)
leaves = _get_leaves(root_node, [])
# Start cluster id's at 0
clustid = 0
n_samples = len(reachability)
is_core = np.zeros(n_samples, dtype=bool)
labels = np.full(n_samples, -1, dtype=int)
# Start all points as non-core noise
for leaf in leaves:
index = ordering[leaf.start:leaf.end]
labels[index] = clustid
is_core[index] = 1
clustid += 1
return np.arange(n_samples)[is_core], labels
def _automatic_cluster(reachability_plot, ordering,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min,
min_cluster_size, min_maxima_ratio):
"""Converts reachability plot to cluster tree and returns root node.
Parameters
----------
reachability_plot : list, required
Reachability distances ordered by OPTICS ordering index.
"""
min_neighborhood_size = 2
if min_cluster_size <= 1:
min_cluster_size = max(2, min_cluster_size * len(ordering))
neighborhood_size = int(min_maxima_ratio * len(ordering))
# Again, should this check < min_samples, should the parameter be public?
if neighborhood_size < min_neighborhood_size:
neighborhood_size = min_neighborhood_size
local_maxima_points = _find_local_maxima(reachability_plot,
neighborhood_size)
root_node = _TreeNode(ordering, 0, len(ordering), None)
_cluster_tree(root_node, None, local_maxima_points,
reachability_plot, ordering, min_cluster_size,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
return root_node
class _TreeNode(object):
# automatic cluster helper classes and functions
def __init__(self, points, start, end, parent_node):
self.points = points
self.start = start
self.end = end
self.parent_node = parent_node
self.children = []
self.split_point = -1
def _is_local_maxima(index, reachability_plot, neighborhood_size):
right_idx = slice(index + 1, index + neighborhood_size + 1)
left_idx = slice(max(1, index - neighborhood_size - 1), index)
return (np.all(reachability_plot[index] >= reachability_plot[left_idx]) and
np.all(reachability_plot[index] >= reachability_plot[right_idx]))
def _find_local_maxima(reachability_plot, neighborhood_size):
local_maxima_points = {}
# 1st and last points on Reachability Plot are not taken
# as local maxima points
for i in range(1, len(reachability_plot) - 1):
# if the point is a local maxima on the reachability plot with
# regard to neighborhood_size, insert it into priority queue and
# maxima list
if (reachability_plot[i] > reachability_plot[i - 1] and
reachability_plot[i] >= reachability_plot[i + 1] and
_is_local_maxima(i, np.array(reachability_plot),
neighborhood_size) == 1):
local_maxima_points[i] = reachability_plot[i]
return sorted(local_maxima_points,
key=local_maxima_points.__getitem__, reverse=True)
def _cluster_tree(node, parent_node, local_maxima_points,
reachability_plot, reachability_ordering,
min_cluster_size, maxima_ratio, rejection_ratio,
similarity_threshold, significant_min):
"""Recursively builds cluster tree to hold hierarchical cluster structure
node is a node or the root of the tree in the first call
parent_node is parent node of N or None if node is root of the tree
local_maxima_points is list of local maxima points sorted in
descending order of reachability
"""
if len(local_maxima_points) == 0:
return # parent_node is a leaf
# take largest local maximum as possible separation between clusters
s = local_maxima_points[0]
node.split_point = s
local_maxima_points = local_maxima_points[1:]
# create two new nodes and add to list of nodes
node_1 = _TreeNode(reachability_ordering[node.start:s],
node.start, s, node)
node_2 = _TreeNode(reachability_ordering[s + 1:node.end],
s + 1, node.end, node)
local_max_1 = []
local_max_2 = []
for i in local_maxima_points:
if i < s:
local_max_1.append(i)
if i > s:
local_max_2.append(i)
node_list = []
node_list.append((node_1, local_max_1))
node_list.append((node_2, local_max_2))
if reachability_plot[s] < significant_min:
node.split_point = -1
# if split_point is not significant, ignore this split and continue
return
# only check a certain ratio of points in the child
# nodes formed to the left and right of the maxima
# ...should check_ratio be a user settable parameter?
check_ratio = .8
check_value_1 = int(np.round(check_ratio * len(node_1.points)))
check_value_2 = int(np.round(check_ratio * len(node_2.points)))
avg_reach1 = np.mean(reachability_plot[(node_1.end -
check_value_1):node_1.end])
avg_reach2 = np.mean(reachability_plot[node_2.start:(node_2.start
+ check_value_2)])
if ((avg_reach1 / reachability_plot[s]) > maxima_ratio or
(avg_reach2 / reachability_plot[s]) > maxima_ratio):
if (avg_reach1 / reachability_plot[s]) < rejection_ratio:
# reject node 2
node_list.remove((node_2, local_max_2))
if (avg_reach2 / reachability_plot[s]) < rejection_ratio:
# reject node 1
node_list.remove((node_1, local_max_1))
if ((avg_reach1 / reachability_plot[s]) >= rejection_ratio and
(avg_reach2 / reachability_plot[s]) >= rejection_ratio):
# since split_point is not significant,
# ignore this split and continue (reject both child nodes)
node.split_point = -1
_cluster_tree(node, parent_node, local_maxima_points,
reachability_plot, reachability_ordering,
min_cluster_size, maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
return
# remove clusters that are too small
if (len(node_1.points) < min_cluster_size and
node_list.count((node_1, local_max_1)) > 0):
# cluster 1 is too small
node_list.remove((node_1, local_max_1))
if (len(node_2.points) < min_cluster_size and
node_list.count((node_2, local_max_2)) > 0):
# cluster 2 is too small
node_list.remove((node_2, local_max_2))
if not node_list:
# parent_node will be a leaf
node.split_point = -1
return
# Check if nodes can be moved up one level - the new cluster created
# is too "similar" to its parent, given the similarity threshold.
bypass_node = 0
if parent_node is not None:
if ((node.end - node.start) / (parent_node.end - parent_node.start) >
similarity_threshold):
parent_node.children.remove(node)
bypass_node = 1
for nl in node_list:
if bypass_node == 1:
parent_node.children.append(nl[0])
_cluster_tree(nl[0], parent_node, nl[1],
reachability_plot, reachability_ordering,
min_cluster_size, maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
else:
node.children.append(nl[0])
_cluster_tree(nl[0], node, nl[1], reachability_plot,
reachability_ordering, min_cluster_size,
maxima_ratio, rejection_ratio,
similarity_threshold, significant_min)
def _get_leaves(node, arr):
if node is not None:
if node.split_point == -1:
arr.append(node)
for n in node.children:
_get_leaves(n, arr)
return arr
| bsd-3-clause |
xflin/spark | python/pyspark/sql/tests.py | 2 | 131465 | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
_have_pandas = False
try:
import pandas
_have_pandas = True
except:
# No Pandas, but that's okay, we'll skip those tests
pass
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.tests import QuietTest, ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
_have_arrow = False
try:
import pyarrow
_have_arrow = True
except:
# No Arrow, but that's okay, we'll skip those tests
pass
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace list while value is not given (default to None)
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
self.assertTupleEqual(row, (None, 10, 80.0))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
try:
self.spark.conf.set("spark.sql.crossJoin.enabled", "false")
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
self.spark.conf.set("spark.sql.crossJoin.enabled", "true")
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
finally:
# We should unset this. Otherwise, other tests are affected.
self.spark.conf.unset("spark.sql.crossJoin.enabled")
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())
data = [
(1, "foo", True, 3.0), (2, "foo", True, 5.0),
(3, "bar", False, -1.0), (4, "bar", False, 6.0),
]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(not _have_arrow, "Arrow not installed")
class ArrowTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True)])
cls.data = [("a", 1, 10, 0.2, 2.0),
("b", 2, 20, 0.4, 4.0),
("c", 3, 30, 0.8, 6.0)]
def assertFramesEqual(self, df_with_arrow, df_without):
msg = ("DataFrame from Arrow is not equal" +
("\n\nWith Arrow:\n%s\n%s" % (df_with_arrow, df_with_arrow.dtypes)) +
("\n\nWithout:\n%s\n%s" % (df_without, df_without.dtypes)))
self.assertTrue(df_without.equals(df_with_arrow), msg=msg)
def test_unsupported_datatype(self):
schema = StructType([StructField("dt", DateType(), True)])
df = self.spark.createDataFrame([(datetime.date(1970, 1, 1),)], schema=schema)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: df.toPandas())
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
pdf = df.toPandas()
self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_pandas_round_trip(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
pdf = pd.DataFrame(data=data_dict)
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| apache-2.0 |
dhermes/google-cloud-python | bigquery/google/cloud/bigquery/client.py | 2 | 69036 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from __future__ import absolute_import
try:
from collections import abc as collections_abc
except ImportError: # Python 2.7
import collections as collections_abc
import functools
import gzip
import os
import uuid
import six
from google import resumable_media
from google.resumable_media.requests import MultipartUpload
from google.resumable_media.requests import ResumableUpload
from google.api_core import page_iterator
import google.cloud._helpers
from google.cloud import exceptions
from google.cloud.client import ClientWithProject
from google.cloud.bigquery._helpers import _SCALAR_VALUE_TO_JSON_ROW
from google.cloud.bigquery._helpers import _str_or_none
from google.cloud.bigquery._http import Connection
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetListItem
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery import job
from google.cloud.bigquery.query import _QueryResults
from google.cloud.bigquery.retry import DEFAULT_RETRY
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TableListItem
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.table import RowIterator
from google.cloud.bigquery.table import _TABLE_HAS_NO_SCHEMA
from google.cloud.bigquery.table import _row_from_mapping
_DEFAULT_CHUNKSIZE = 1048576 # 1024 * 1024 B = 1 MB
_MAX_MULTIPART_SIZE = 5 * 1024 * 1024
_DEFAULT_NUM_RETRIES = 6
_BASE_UPLOAD_TEMPLATE = (
u"https://www.googleapis.com/upload/bigquery/v2/projects/"
u"{project}/jobs?uploadType="
)
_MULTIPART_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"multipart"
_RESUMABLE_URL_TEMPLATE = _BASE_UPLOAD_TEMPLATE + u"resumable"
_GENERIC_CONTENT_TYPE = u"*/*"
_READ_LESS_THAN_SIZE = (
"Size {:d} was specified but the file-like object only had " "{:d} bytes remaining."
)
class Project(object):
"""Wrapper for resource describing a BigQuery project.
:type project_id: str
:param project_id: Opaque ID of the project
:type numeric_id: int
:param numeric_id: Numeric ID of the project
:type friendly_name: str
:param friendly_name: Display name of the project
"""
def __init__(self, project_id, numeric_id, friendly_name):
self.project_id = project_id
self.numeric_id = numeric_id
self.friendly_name = friendly_name
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct an instance from a resource dict."""
return cls(resource["id"], resource["numericId"], resource["friendlyName"])
class Client(ClientWithProject):
"""Client to bundle configuration needed for API requests.
Args:
project (str):
Project ID for the project which the client acts on behalf of.
Will be passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
credentials (google.auth.credentials.Credentials):
(Optional) The OAuth2 Credentials to use for this client. If not
passed (and if no ``_http`` object is passed), falls back to the
default inferred from the environment.
_http (requests.Session):
(Optional) HTTP object to make requests. Can be any object that
defines ``request()`` with the same interface as
:meth:`requests.Session.request`. If not passed, an ``_http``
object is created that is bound to the ``credentials`` for the
current object.
This parameter should be considered private, and could change in
the future.
location (str):
(Optional) Default location for jobs / datasets / tables.
default_query_job_config (google.cloud.bigquery.job.QueryJobConfig):
(Optional) Default ``QueryJobConfig``.
Will be merged into job configs passed into the ``query`` method.
Raises:
google.auth.exceptions.DefaultCredentialsError:
Raised if ``credentials`` is not specified and the library fails
to acquire default credentials.
"""
SCOPE = (
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
)
"""The scopes required for authenticating as a BigQuery consumer."""
def __init__(
self,
project=None,
credentials=None,
_http=None,
location=None,
default_query_job_config=None,
):
super(Client, self).__init__(
project=project, credentials=credentials, _http=_http
)
self._connection = Connection(self)
self._location = location
self._default_query_job_config = default_query_job_config
@property
def location(self):
"""Default location for jobs / datasets / tables."""
return self._location
def get_service_account_email(self, project=None):
"""Get the email address of the project's BigQuery service account
Note:
This is the service account that BigQuery uses to manage tables
encrypted by a key in KMS.
Args:
project (str, optional):
Project ID to use for retreiving service account email.
Defaults to the client's project.
Returns:
str: service account email address
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> client.get_service_account_email()
my_service_account@my-project.iam.gserviceaccount.com
"""
if project is None:
project = self.project
path = "/projects/%s/serviceAccount" % (project,)
api_response = self._connection.api_request(method="GET", path=path)
return api_response["email"]
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
"""
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
)
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
"""
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def dataset(self, dataset_id, project=None):
"""Construct a reference to a dataset.
:type dataset_id: str
:param dataset_id: ID of the dataset.
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
:rtype: :class:`google.cloud.bigquery.dataset.DatasetReference`
:returns: a new ``DatasetReference`` instance
"""
if project is None:
project = self.project
return DatasetReference(project, dataset_id)
def create_dataset(self, dataset):
"""API call: create the dataset via a POST request.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.dataset.Dataset` to create.
If ``dataset`` is a reference, an empty dataset is created
with the specified ID and client's default location.
Returns:
google.cloud.bigquery.dataset.Dataset:
A new ``Dataset`` returned from the API.
Example:
>>> from google.cloud import bigquery
>>> client = bigquery.Client()
>>> dataset = bigquery.Dataset(client.dataset('my_dataset'))
>>> dataset = client.create_dataset(dataset)
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if isinstance(dataset, DatasetReference):
dataset = Dataset(dataset)
path = "/projects/%s/datasets" % (dataset.project,)
data = dataset.to_api_repr()
if data.get("location") is None and self.location is not None:
data["location"] = self.location
api_response = self._connection.api_request(method="POST", path=path, data=data)
return Dataset.from_api_repr(api_response)
def create_table(self, table):
"""API call: create a table via a PUT request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/insert
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A :class:`~google.cloud.bigquery.table.Table` to create.
If ``table`` is a reference, an empty table is created
with the specified ID. The dataset that the table belongs to
must already exist.
Returns:
google.cloud.bigquery.table.Table:
A new ``Table`` returned from the service.
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if isinstance(table, TableReference):
table = Table(table)
path = "/projects/%s/datasets/%s/tables" % (table.project, table.dataset_id)
api_response = self._connection.api_request(
method="POST", path=path, data=table.to_api_repr()
)
return Table.from_api_repr(api_response)
def _call_api(self, retry, **kwargs):
call = functools.partial(self._connection.api_request, **kwargs)
if retry:
call = retry(call)
return call()
def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY):
"""Fetch the dataset referenced by ``dataset_ref``
Args:
dataset_ref (Union[ \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
dataset reference from a string using
:func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
A ``Dataset`` instance.
"""
if isinstance(dataset_ref, str):
dataset_ref = DatasetReference.from_string(
dataset_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=dataset_ref.path)
return Dataset.from_api_repr(api_response)
def get_table(self, table_ref, retry=DEFAULT_RETRY):
"""Fetch the table referenced by ``table_ref``.
Args:
table_ref (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to fetch from the BigQuery API.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.Table:
A ``Table`` instance.
"""
if isinstance(table_ref, str):
table_ref = TableReference.from_string(
table_ref, default_project=self.project
)
api_response = self._call_api(retry, method="GET", path=table_ref.path)
return Table.from_api_repr(api_response)
def update_dataset(self, dataset, fields, retry=DEFAULT_RETRY):
"""Change some fields of a dataset.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None`` in
``dataset``, it will be deleted.
If ``dataset.etag`` is not ``None``, the update will only
succeed if the dataset on the server has the same ETag. Thus
reading a dataset with ``get_dataset``, changing its fields,
and then passing it to ``update_dataset`` will ensure that the changes
will only be saved if no modifications to the dataset occurred
since the read.
Args:
dataset (google.cloud.bigquery.dataset.Dataset):
The dataset to update.
fields (Sequence[str]):
The properties of ``dataset`` to change (e.g. "friendly_name").
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
Returns:
google.cloud.bigquery.dataset.Dataset:
The modified ``Dataset`` instance.
"""
partial = dataset._build_resource(fields)
if dataset.etag is not None:
headers = {"If-Match": dataset.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=dataset.path, data=partial, headers=headers
)
return Dataset.from_api_repr(api_response)
def update_table(self, table, fields, retry=DEFAULT_RETRY):
"""Change some fields of a table.
Use ``fields`` to specify which fields to update. At least one field
must be provided. If a field is listed in ``fields`` and is ``None``
in ``table``, it will be deleted.
If ``table.etag`` is not ``None``, the update will only succeed if
the table on the server has the same ETag. Thus reading a table with
``get_table``, changing its fields, and then passing it to
``update_table`` will ensure that the changes will only be saved if
no modifications to the table occurred since the read.
Args:
table (google.cloud.bigquery.table.Table): The table to update.
fields (Sequence[str]):
The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
retry (google.api_core.retry.Retry):
(Optional) A description of how to retry the API call.
Returns:
google.cloud.bigquery.table.Table:
The table resource returned from the API call.
"""
partial = table._build_resource(fields)
if table.etag is not None:
headers = {"If-Match": table.etag}
else:
headers = None
api_response = self._call_api(
retry, method="PATCH", path=table.path, data=partial, headers=headers
)
return Table.from_api_repr(api_response)
def list_tables(
self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY
):
"""List tables in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose tables to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of tables to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the tables. If
not passed, the API will return the first page of tables. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.table.TableListItem` contained
within the requested dataset.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset, DatasetReference, or string")
path = "%s/tables" % dataset.path
result = page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_table,
items_key="tables",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
def delete_dataset(self, dataset, delete_contents=False, retry=DEFAULT_RETRY):
"""Delete a dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
Args
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset to delete. If a string is passed
in, this method attempts to create a dataset reference from a
string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
delete_contents (boolean):
(Optional) If True, delete all the tables in the dataset. If
False and the dataset contains tables, the request will fail.
Default is False.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset or a DatasetReference")
params = {}
if delete_contents:
params["deleteContents"] = "true"
self._call_api(retry, method="DELETE", path=dataset.path, query_params=params)
def delete_table(self, table, retry=DEFAULT_RETRY):
"""Delete a table
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/delete
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
A reference to the table to delete. If a string is passed in,
this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if not isinstance(table, (Table, TableReference)):
raise TypeError("table must be a Table or a TableReference")
self._call_api(retry, method="DELETE", path=table.path)
def _get_query_results(
self, job_id, retry, project=None, timeout_ms=None, location=None
):
"""Get the query results object for a query job.
Arguments:
job_id (str): Name of the query job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
project (str):
(Optional) project ID for the query job (defaults to the
project of the client).
timeout_ms (int):
(Optional) number of milliseconds the the API call should
wait for the query to complete before the request times out.
location (str): Location of the query job.
Returns:
google.cloud.bigquery.query._QueryResults:
A new ``_QueryResults`` instance.
"""
extra_params = {"maxResults": 0}
if project is None:
project = self.project
if timeout_ms is not None:
extra_params["timeoutMs"] = timeout_ms
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/queries/{}".format(project, job_id)
# This call is typically made in a polling loop that checks whether the
# job is complete (from QueryJob.done(), called ultimately from
# QueryJob.result()). So we don't need to poll here.
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return _QueryResults.from_api_repr(resource)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`google.cloud.bigquery.job.LoadJob`,
:class:`google.cloud.bigquery.job.CopyJob`,
:class:`google.cloud.bigquery.job.ExtractJob`,
or :class:`google.cloud.bigquery.job.QueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource.get("configuration", {})
if "load" in config:
return job.LoadJob.from_api_repr(resource, self)
elif "copy" in config:
return job.CopyJob.from_api_repr(resource, self)
elif "extract" in config:
return job.ExtractJob.from_api_repr(resource, self)
elif "query" in config:
return job.QueryJob.from_api_repr(resource, self)
return job.UnknownJob.from_api_repr(resource, self)
def get_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Fetch a job for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which ownsthe job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}".format(project, job_id)
resource = self._call_api(
retry, method="GET", path=path, query_params=extra_params
)
return self.job_from_resource(resource)
def cancel_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY):
"""Attempt to cancel a job from a job ID.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel
Arguments:
job_id (str): Unique job identifier.
Keyword Arguments:
project (str):
(Optional) ID of the project which owns the job (defaults to
the client's project).
location (str): Location where the job was run.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
Union[google.cloud.bigquery.job.LoadJob, \
google.cloud.bigquery.job.CopyJob, \
google.cloud.bigquery.job.ExtractJob, \
google.cloud.bigquery.job.QueryJob]:
Job instance, based on the resource returned by the API.
"""
extra_params = {"projection": "full"}
if project is None:
project = self.project
if location is None:
location = self.location
if location is not None:
extra_params["location"] = location
path = "/projects/{}/jobs/{}/cancel".format(project, job_id)
resource = self._call_api(
retry, method="POST", path=path, query_params=extra_params
)
return self.job_from_resource(resource["job"])
def list_jobs(
self,
project=None,
max_results=None,
page_token=None,
all_users=None,
state_filter=None,
retry=DEFAULT_RETRY,
min_creation_time=None,
max_creation_time=None,
):
"""List jobs for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list
Args:
project (str, optional):
Project ID to use for retreiving datasets. Defaults
to the client's project.
max_results (int, optional):
Maximum number of jobs to return.
page_token (str, optional):
Opaque marker for the next "page" of jobs. If not
passed, the API will return the first page of jobs. The token
marks the beginning of the iterator to be returned and the
value of the ``page_token`` can be accessed at
``next_page_token`` of
:class:`~google.api_core.page_iterator.HTTPIterator`.
all_users (bool, optional):
If true, include jobs owned by all users in the project.
Defaults to :data:`False`.
state_filter (str, optional):
If set, include only jobs matching the given state. One of:
* ``"done"``
* ``"pending"``
* ``"running"``
retry (google.api_core.retry.Retry, optional):
How to retry the RPC.
min_creation_time (datetime.datetime, optional):
Min value for job creation time. If set, only jobs created
after or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
max_creation_time (datetime.datetime, optional):
Max value for job creation time. If set, only jobs created
before or at this timestamp are returned. If the datetime has
no time zone assumes UTC time.
Returns:
google.api_core.page_iterator.Iterator:
Iterable of job instances.
"""
extra_params = {
"allUsers": all_users,
"stateFilter": state_filter,
"minCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(min_creation_time)
),
"maxCreationTime": _str_or_none(
google.cloud._helpers._millis_from_datetime(max_creation_time)
),
"projection": "full",
}
extra_params = {
param: value for param, value in extra_params.items() if value is not None
}
if project is None:
project = self.project
path = "/projects/%s/jobs" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_job,
items_key="jobs",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
)
def load_table_from_uri(
self,
source_uris,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
Arguments:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source_uris, six.string_types):
source_uris = [source_uris]
if isinstance(destination, str):
destination = TableReference.from_string(
destination, default_project=self.project
)
load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config)
load_job._begin(retry=retry)
return load_job
def load_table_from_file(
self,
file_obj,
destination,
rewind=False,
size=None,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of this table from a file-like object.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
file_obj (file): A file handle opened in binary mode for reading.
destination (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
rewind (bool):
If True, seek to the beginning of the file handle before
reading the file.
size (int):
The number of bytes to read from the file handle. If size is
``None`` or large, resumable upload will be used. Otherwise,
multipart upload will be used.
num_retries (int): Number of upload retries. Defaults to 6.
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ValueError:
If ``size`` is not passed in and can not be determined, or if
the ``file_obj`` can be detected to be a file opened in text
mode.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
if isinstance(destination, str):
destination = TableReference.from_string(
destination, default_project=self.project
)
job_ref = job._JobReference(job_id, project=project, location=location)
load_job = job.LoadJob(job_ref, None, destination, self, job_config)
job_resource = load_job.to_api_repr()
if rewind:
file_obj.seek(0, os.SEEK_SET)
_check_mode(file_obj)
try:
if size is None or size >= _MAX_MULTIPART_SIZE:
response = self._do_resumable_upload(
file_obj, job_resource, num_retries
)
else:
response = self._do_multipart_upload(
file_obj, job_resource, size, num_retries
)
except resumable_media.InvalidResponse as exc:
raise exceptions.from_http_response(exc.response)
return self.job_from_resource(response.json())
def load_table_from_dataframe(
self,
dataframe,
destination,
num_retries=_DEFAULT_NUM_RETRIES,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
):
"""Upload the contents of a table from a pandas DataFrame.
Similar to :meth:`load_table_from_uri`, this method creates, starts and
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
Arguments:
dataframe (pandas.DataFrame):
A :class:`~pandas.DataFrame` containing the data to load.
destination (google.cloud.bigquery.table.TableReference):
The destination table to use for loading the data. If it is an
existing table, the schema of the :class:`~pandas.DataFrame`
must match the schema of the destination table. If the table
does not yet exist, the schema is inferred from the
:class:`~pandas.DataFrame`.
If a string is passed in, this method attempts to create a
table reference from a string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
num_retries (int, optional): Number of upload retries.
job_id (str, optional): Name of the job.
job_id_prefix (str, optional):
The user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str, optional):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig, optional):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
Raises:
ImportError:
If a usable parquet engine cannot be found. This method
requires :mod:`pyarrow` to be installed.
"""
buffer = six.BytesIO()
dataframe.to_parquet(buffer)
if job_config is None:
job_config = job.LoadJobConfig()
job_config.source_format = job.SourceFormat.PARQUET
if location is None:
location = self.location
return self.load_table_from_file(
buffer,
destination,
num_retries=num_retries,
rewind=True,
job_id=job_id,
job_id_prefix=job_id_prefix,
location=location,
project=project,
job_config=job_config,
)
def _do_resumable_upload(self, stream, metadata, num_retries):
"""Perform a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the final chunk
is uploaded.
"""
upload, transport = self._initiate_resumable_upload(
stream, metadata, num_retries
)
while not upload.finished:
response = upload.transmit_next_chunk(transport)
return response
def _initiate_resumable_upload(self, stream, metadata, num_retries):
"""Initiate a resumable upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: tuple
:returns:
Pair of
* The :class:`~google.resumable_media.requests.ResumableUpload`
that was created
* The ``transport`` used to initiate the upload.
"""
chunk_size = _DEFAULT_CHUNKSIZE
transport = self._http
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _RESUMABLE_URL_TEMPLATE.format(project=self.project)
# TODO: modify ResumableUpload to take a retry.Retry object
# that it can use for the initial RPC.
upload = ResumableUpload(upload_url, chunk_size, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
upload.initiate(
transport, stream, metadata, _GENERIC_CONTENT_TYPE, stream_final=False
)
return upload, transport
def _do_multipart_upload(self, stream, metadata, size, num_retries):
"""Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining.
"""
data = stream.read(size)
if len(data) < size:
msg = _READ_LESS_THAN_SIZE.format(size, len(data))
raise ValueError(msg)
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project)
upload = MultipartUpload(upload_url, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE)
return response
def copy_table(
self,
sources,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Copy one or more tables to another table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy
Arguments:
sources (Union[ \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
Sequence[ \
:class:`~google.cloud.bigquery.table.TableReference`], \
]):
Table or tables to be copied.
destination (Union[
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be copied.
Keyword Arguments:
job_id (str): (Optional) The ID of the job.
job_id_prefix (str)
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of any
source table as well as the destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.CopyJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.CopyJob: A new copy job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(sources, str):
sources = TableReference.from_string(sources, default_project=self.project)
if isinstance(destination, str):
destination = TableReference.from_string(
destination, default_project=self.project
)
if not isinstance(sources, collections_abc.Sequence):
sources = [sources]
copy_job = job.CopyJob(
job_ref, sources, destination, client=self, job_config=job_config
)
copy_job._begin(retry=retry)
return copy_job
def extract_table(
self,
source,
destination_uris,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Start a job to extract a table into Cloud Storage files.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract
Arguments:
source (Union[ \
:class:`google.cloud.bigquery.table.TableReference`, \
src, \
]):
Table to be extracted.
destination_uris (Union[str, Sequence[str]]):
URIs of Cloud Storage file(s) into which table data is to be
extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
Keyword Arguments:
job_id (str): (Optional) The ID of the job.
job_id_prefix (str)
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
source table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.ExtractJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
:type source: :class:`google.cloud.bigquery.table.TableReference`
:param source: table to be extracted.
Returns:
google.cloud.bigquery.job.ExtractJob: A new extract job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source, str):
source = TableReference.from_string(source, default_project=self.project)
if isinstance(destination_uris, six.string_types):
destination_uris = [destination_uris]
extract_job = job.ExtractJob(
job_ref, source, destination_uris, client=self, job_config=job_config
)
extract_job._begin(retry=retry)
return extract_job
def query(
self,
query,
job_config=None,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
retry=DEFAULT_RETRY,
):
"""Run a SQL query.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query
Arguments:
query (str):
SQL query to be executed. Defaults to the standard SQL
dialect. Use the ``job_config`` parameter to change dialects.
Keyword Arguments:
job_config (google.cloud.bigquery.job.QueryJobConfig):
(Optional) Extra configuration options for the job.
To override any options that were previously set in
the ``default_query_job_config`` given to the
``Client`` constructor, manually set those options to ``None``,
or whatever value is preferred.
job_id (str): (Optional) ID to use for the query job.
job_id_prefix (str):
(Optional) The prefix to use for a randomly generated job ID.
This parameter will be ignored if a ``job_id`` is also given.
location (str):
Location where to run the job. Must match the location of the
any table used in the query as well as the destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.QueryJob: A new query job instance.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
if self._default_query_job_config:
if job_config:
# anything that's not defined on the incoming
# that is in the default,
# should be filled in with the default
# the incoming therefore has precedence
job_config = job_config._fill_from_default(
self._default_query_job_config
)
else:
job_config = self._default_query_job_config
job_ref = job._JobReference(job_id, project=project, location=location)
query_job = job.QueryJob(job_ref, query, client=self, job_config=job_config)
query_job._begin(retry=retry)
return query_job
def insert_rows(self, table, rows, selected_fields=None, **kwargs):
"""Insert rows into a table via the streaming API.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
rows (Union[ \
Sequence[Tuple], \
Sequence[dict], \
]):
Row data to be inserted. If a list of tuples is given, each
tuple should contain data for each schema field on the
current table and in the same order as the schema fields. If
a list of dictionaries is given, the keys must include all
required fields in the schema. Keys which do not correspond
to a field in the schema are ignored.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
kwargs (dict):
Keyword arguments to
:meth:`~google.cloud.bigquery.client.Client.insert_rows_json`.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
Raises:
ValueError: if table's schema is not set
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if selected_fields is not None:
schema = selected_fields
elif isinstance(table, TableReference):
raise ValueError("need selected_fields with TableReference")
elif isinstance(table, Table):
if len(table.schema) == 0:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
schema = table.schema
else:
raise TypeError("table should be Table or TableReference")
json_rows = []
for index, row in enumerate(rows):
if isinstance(row, dict):
row = _row_from_mapping(row, schema)
json_row = {}
for field, value in zip(schema, row):
converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type)
if converter is not None: # STRING doesn't need converting
value = converter(value)
json_row[field.name] = value
json_rows.append(json_row)
return self.insert_rows_json(table, json_rows, **kwargs)
def insert_rows_json(
self,
table,
json_rows,
row_ids=None,
skip_invalid_rows=None,
ignore_unknown_values=None,
template_suffix=None,
retry=DEFAULT_RETRY,
):
"""Insert rows into a table without applying local type conversions.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
table (Union[ \
:class:`~google.cloud.bigquery.table.Table` \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The destination table for the row data, or a reference to it.
json_rows (Sequence[dict]):
Row data to be inserted. Keys must match the table schema fields
and values must be JSON-compatible representations.
row_ids (Sequence[str]):
(Optional) Unique ids, one per row being inserted. If omitted,
unique IDs are created.
skip_invalid_rows (bool):
(Optional) Insert all valid rows of a request, even if invalid
rows exist. The default value is False, which causes the entire
request to fail if any invalid rows exist.
ignore_unknown_values (bool):
(Optional) Accept rows that contain values that do not match the
schema. The unknown values are ignored. Default is False, which
treats unknown values as errors.
template_suffix (str):
(Optional) treat ``name`` as a template table and provide a suffix.
BigQuery will create the table ``<name> + <template_suffix>`` based
on the schema of the template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
Sequence[Mappings]:
One mapping per row with insert errors: the "index" key
identifies the row, and the "errors" key contains a list of
the mappings describing one or more problems with the row.
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
rows_info = []
data = {"rows": rows_info}
for index, row in enumerate(json_rows):
info = {"json": row}
if row_ids is not None:
info["insertId"] = row_ids[index]
else:
info["insertId"] = str(uuid.uuid4())
rows_info.append(info)
if skip_invalid_rows is not None:
data["skipInvalidRows"] = skip_invalid_rows
if ignore_unknown_values is not None:
data["ignoreUnknownValues"] = ignore_unknown_values
if template_suffix is not None:
data["templateSuffix"] = template_suffix
# We can always retry, because every row has an insert ID.
response = self._call_api(
retry, method="POST", path="%s/insertAll" % table.path, data=data
)
errors = []
for error in response.get("insertErrors", ()):
errors.append({"index": int(error["index"]), "errors": error["errors"]})
return errors
def list_partitions(self, table, retry=DEFAULT_RETRY):
"""List the partitions in a table.
Arguments:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The table or reference from which to get partition info
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
List[str]:
A list of the partition ids present in the partitioned table
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
meta_table = self.get_table(
TableReference(
self.dataset(table.dataset_id, project=table.project),
"%s$__PARTITIONS_SUMMARY__" % table.table_id,
)
)
subset = [col for col in meta_table.schema if col.name == "partition_id"]
return [
row[0]
for row in self.list_rows(meta_table, selected_fields=subset, retry=retry)
]
def list_rows(
self,
table,
selected_fields=None,
max_results=None,
page_token=None,
start_index=None,
page_size=None,
retry=DEFAULT_RETRY,
):
"""List the rows of the table.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/list
.. note::
This method assumes that the provided schema is up-to-date with the
schema as defined on the back-end: if the two schemas are not
identical, the values returned may be incomplete. To ensure that the
local copy of the schema is up-to-date, call ``client.get_table``.
Args:
table (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
The table to list, or a reference to it.
selected_fields (Sequence[ \
:class:`~google.cloud.bigquery.schema.SchemaField` \
]):
The fields to return. Required if ``table`` is a
:class:`~google.cloud.bigquery.table.TableReference`.
max_results (int):
(Optional) maximum number of rows to return.
page_token (str):
(Optional) Token representing a cursor into the table's rows.
If not passed, the API will return the first page of the
rows. The token marks the beginning of the iterator to be
returned and the value of the ``page_token`` can be accessed
at ``next_page_token`` of the
:class:`~google.cloud.bigquery.table.RowIterator`.
start_index (int):
(Optional) The zero-based index of the starting row to read.
page_size (int):
(Optional) The maximum number of items to return per page in
the iterator.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.table.RowIterator:
Iterator of row data
:class:`~google.cloud.bigquery.table.Row`-s. During each
page, the iterator will have the ``total_rows`` attribute
set, which counts the total number of rows **in the table**
(this is distinct from the total number of rows in the
current page: ``iterator.page.num_items``).
"""
if isinstance(table, str):
table = TableReference.from_string(table, default_project=self.project)
if selected_fields is not None:
schema = selected_fields
elif isinstance(table, TableReference):
raise ValueError("need selected_fields with TableReference")
elif isinstance(table, Table):
if len(table.schema) == 0 and table.created is None:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
schema = table.schema
else:
raise TypeError("table should be Table or TableReference")
params = {}
if selected_fields is not None:
params["selectedFields"] = ",".join(field.name for field in selected_fields)
if start_index is not None:
params["startIndex"] = start_index
row_iterator = RowIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="%s/data" % (table.path,),
schema=schema,
page_token=page_token,
max_results=max_results,
page_size=page_size,
extra_params=params,
)
return row_iterator
# pylint: disable=unused-argument
def _item_to_project(iterator, resource):
"""Convert a JSON project to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a project.
:rtype: :class:`.Project`
:returns: The next project in the page.
"""
return Project.from_api_repr(resource)
# pylint: enable=unused-argument
def _item_to_dataset(iterator, resource):
"""Convert a JSON dataset to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a dataset.
:rtype: :class:`.DatasetListItem`
:returns: The next dataset in the page.
"""
return DatasetListItem(resource)
def _item_to_job(iterator, resource):
"""Convert a JSON job to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a job.
:rtype: job instance.
:returns: The next job in the page.
"""
return iterator.client.job_from_resource(resource)
def _item_to_table(iterator, resource):
"""Convert a JSON table to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a table.
:rtype: :class:`~google.cloud.bigquery.table.Table`
:returns: The next table in the page.
"""
return TableListItem(resource)
def _make_job_id(job_id, prefix=None):
"""Construct an ID for a new job.
:type job_id: str or ``NoneType``
:param job_id: the user-provided job ID
:type prefix: str or ``NoneType``
:param prefix: (Optional) the user-provided prefix for a job ID
:rtype: str
:returns: A job ID
"""
if job_id is not None:
return job_id
elif prefix is not None:
return str(prefix) + str(uuid.uuid4())
else:
return str(uuid.uuid4())
def _check_mode(stream):
"""Check that a stream was opened in read-binary mode.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute
and is not among ``rb``, ``r+b`` or ``rb+``.
"""
mode = getattr(stream, "mode", None)
if isinstance(stream, gzip.GzipFile):
if mode != gzip.READ:
raise ValueError(
"Cannot upload gzip files opened in write mode: use "
"gzip.GzipFile(filename, mode='rb')"
)
else:
if mode is not None and mode not in ("rb", "r+b", "rb+"):
raise ValueError(
"Cannot upload files opened in text mode: use "
"open(filename, mode='rb') or open(filename, mode='r+b')"
)
def _get_upload_headers(user_agent):
"""Get the headers for an upload request.
:type user_agent: str
:param user_agent: The user-agent for requests.
:rtype: dict
:returns: The headers to be used for the request.
"""
return {
"Accept": "application/json",
"Accept-Encoding": "gzip, deflate",
"User-Agent": user_agent,
"content-type": "application/json",
}
| apache-2.0 |
Arcanewinds/FDL-LunarResources | CraterDetection/Polygon/craterDetector.py | 1 | 47437 | #Written by Timothy Seabrook
#timothy.seabrook@cs.ox.ac.uk
#This whole script takes a bit too long to run and didn't end up being too effective.
#The basic idea is:
#1. Detect edges using a canny filter (This in itself isn't reliable enough)
#2. Group edges into 'shapes' permitting that some gaps may exist
#3. For each shape, use a line-of-fit split-and-merge strategy to form straight lines from pixels
#4. Convert shapes into graphs - lines to nodes and edges
#5. Find cycles in graphs to identify convex shapes
#6. Threshold convex shapes to identify craters
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
import os
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
from scipy.sparse import csr_matrix
from graphCycles import Graph
import split_and_merge as sm
from PIL import Image
import glymur
import gdal
def edgeCluster(edges, max_step):
#edgeCluster algorithm
#Perform a walk from each edge pixel
#max_step determines how far a pixel can be for it
# to be considered part of the same edge
w, h = edges.shape[1], edges.shape[0] #size of search area
labels = np.zeros((h, w), dtype=np.uint32) #uint32 covers 0 to 4,294,967,295
data = np.where(edges)
nextLabel = 0 #Region ID (0 means unlabelled)
checkList = [] #Initialise checklist, contains pixels for neighbourhood traversability checks
num_total = len(data[0]) #Count number of valid unlabelled pixels
num_complete = 0 #Initialise counter
ind = 0
#BEGIN CONNECTED COMPONENTS ALGORITHM
while(num_complete < num_total):
nextLabel += 1 #Increment label class ID
y, x = data[0][ind], data[1][ind]
while(labels[y,x] != 0):
ind += 1
y, x = data[0][ind], data[1][ind]
labels[y,x] = nextLabel #Add next pixel to the new label class
if checkList.__len__() == 0: #Create a list of pixels for FloodFill neighbour checking
checkList = [[y, x]]
else:
checkList = checkList.append([y, x])
#BEGIN FLOODFILL ALGORITHM
while checkList.__len__() > 0: #Whilst there are qualifying pixels in this iteration of FloodFill
y, x = checkList.pop() #Take pixel from checklist, to find qualifying neighbours
num_complete += 1 #update count for timer
#BEGIN LOCATION SPECIFIC NEIGHBOUR INDEXING
if x > (max_step-1):
xmin = -max_step
if x < (w - max_step): #middle column
xmax = 1+max_step
else: #rightmost column
xmax = 1+(w-x-1)
else: #leftmost column
xmax = 1+max_step
xmin = -x
if y > (max_step-1):
ymin = -max_step
if y < (h - max_step): #middle row
ymax = 1+max_step
else: #bottom row
ymax = 1+(h-y-1)
else: #top row
ymax = 1+max_step
ymin = -y
#END LOCATION SPECIFIC NEIGHBOUR INDEXING
#BEGIN NEIGHBOUR TRAVERSABILITY CHECK
for i in range(xmin, xmax):
for j in range(ymin, ymax): #for all neighbouring pixels
if (((j == 0) & (i == 0))!=True): #not including current pixel
if(labels[y + j, x + i] == 0):
if edges[y+j,x+i] == True: #and only considering unlabeled pixels
labels[y+j,x+i] = nextLabel
checkList.append([y+j,x+i])
#END NEIGHBOUR TRAVERSABILITY CHECK
#END FLOODFILL ALGORITHM
#seeds = np.where(labels == 0) #Reset candidate seeds
#END CONNECTED COMPONENTS ALGORITHM
cols = np.arange(labels.size)
M = csr_matrix((cols, (labels.ravel(), cols)),
shape=(labels.max() + 1, labels.size))
indices = [np.unravel_index(row.data, labels.shape) for row in M]
counts = np.zeros((np.max(labels)+1))
for i in range(np.max(labels)+1):
counts[i] = indices[i][0].size
return indices, counts
#return labels #return labels and count
#base_folder = "/Volumes/DATA DISK/PDS_FILES/LROC_NAC/m108898482_cdr_w_jp2/"
#base_filename ="m108898482_cdr_jp2"
base_folder = "/Users/seabrook/Documents/FDL/FDL-LunarResources/PDS_FILES/LROC_NAC/"
base_filename = "M1106504662RE"
filename = base_folder+"P26_0-18000.txt"
d = []
with open(filename,'rb') as source:
for line in source:
fields = line.split('\t')
d.append(fields)
hypothesis = 4
num_nodes = 0
for n in range(len(d)-1):
#base_filename = d[n+1][0]
num_lil_craters = 0
num_craters = 0
num_bigcraters = 0
#curr_filename = filename+str(n+1)+'.jp2'
curr_filename = base_folder+base_filename+'.tif'
ds = gdal.Open(curr_filename)
image = np.array(ds.GetRasterBand(1).ReadAsArray())
#curr_filename = base_folder+base_filename+'_p'+str(n+1)+'.tif'
#if not os.path.isdir(base_folder + 'p' + str(n + 1) + "/"):
# os.mkdir(base_folder + 'p' + str(n + 1) + "/")
# Load picture and detect edges
#image = glymur.Jp2k(curr_filename)[:]
# Low threshold and High threshold represent number of pixels that may be skipped to make a line [4, 60 seems good]
# Sigma represents the width of the guassian smoothing kernel [3 seems good]
edges = canny(image, sigma=3, low_threshold=4, high_threshold=50)
#fig, axarr = plt.subplots(ncols=2, nrows=1, figsize=(10, 4))
#axarr[1].imshow(image, cmap=plt.cm.gray)
#plt.show()
lines, counts = edgeCluster(edges,3)
#segments = np.zeros(len(lines))
segmentParent = np.zeros(len(lines), dtype=int)
#data = np.where(edges)
for i in range(1,len(lines)):
if i == 1:
segments = sm.split_and_merge(lines[i], 1)
segmentParent[i] = len(segments)
else:
segments = np.hstack((segments, sm.split_and_merge(lines[i], 0.5)))
segmentParent[i] = segments.size
#cm = plt.get_cmap('gist_rainbow')
#fig1, axarr = plt.subplots(ncols=2, nrows=1)
#axarr[0].imshow(edges, cmap=plt.cm.gray)
#axarr[1].imshow(image, cmap=plt.cm.gray)
#axarr[1].set_color_cycle([cm(1. * i / 20) for i in range(20)])
#for i in range(1,len(lines)):
# y, x = lines[i]
# axarr[1].scatter(x, y, alpha=0.8, edgecolors='none', s=1)
#fig2, axarr = plt.subplots(ncols=2, nrows=1)
#axarr[0].imshow(image, cmap=plt.cm.gray)
#axarr[1].imshow(image, cmap=plt.cm.gray)
#For every grouped line
nodes = []
for i in range(1,len(lines)):
first = segmentParent[i-1]
last = segmentParent[i]
#For every segment of line
#plt.axes(axarr[0])
for j in range(first,last):
sm.generate_line_ends(segments[j])
# plt.plot([segments[j].start[1], segments[j].end[1]], [segments[j].start[0], segments[j].end[0]], 'r-')
#Hypothesis 1
# proposal: extend all lines by a scalar value to encourage intersection
# result: poor, some lines that already intersect do not need additional reach
# some lines require larger reach still to make important intersections
# conclusion: We require a dynamic value per line, based on context?
#
#Hypothesis 2
# proposal: where two lines can intersect if extended by max((end-mean/2),max_extend)
# they should be
# result: decent, large lines extend too far, most 'easy' craters get captured.
# conclusion: distance between ends of lines is probably better than distance to intersection
#
#If a line can be extended to intersect another, within the bounds of the others data points
#Then it should do so.
#Max extension (in x) permissible for each of two lines to intersect
##############################################################################
if(hypothesis == 2):
max_extend = 5
for j in range(first, last):
for k in range(first,last):
if(j < k):
#Do these lines intersect?
if(segments[j].slope[0] == segments[k].slope[0]):
#They never intersect
intersect = False
else:
#They intersect at [x_cross, y_cross]
#a1x + b1 = a2x + b2
#(a1 - a2)x = (b2 - b1)
#x = (b2-b1)/(a1-a2)
x_cross = np.divide((segments[k].intercept - segments[j].intercept),\
(segments[j].slope[0] - segments[k].slope[0]))
#y = ax + b
y_cross = np.multiply(segments[j].slope[0], x_cross) + segments[j].intercept
#Check that intersection point lies within bounds of map
if((x_cross > 0) & (x_cross < edges.shape[0]) & (y_cross > 0) & (y_cross < edges.shape[1])):
#If x_cross is outside of segment j's maximal bounds
if (x_cross > segments[j].max[0]):
#check that x_cross is close enough to j to warrant intersection
if ((x_cross - segments[j].max[0]) < np.maximum(np.multiply(0.5,(
np.max(segments[j].data[0]) - segments[j].mean[0])),max_extend)):
#If x_cross is outside of segment k's maximals bounds
if (x_cross > segments[k].max[0]):
# check that x_cross is close enough to k to warrant intersection
if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5, (
np.max(segments[k].data[0]) - segments[k].mean[0])), max_extend)):
#If it is, update k(max)
segments[k].max[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].max[1] = y_cross
else:
segments[k].min[1] = y_cross
#update j(max)
segments[j].max[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].max[1] = y_cross
else:
segments[j].min[1] = y_cross
else:
# If x_cross is outside of segment k's minimal bounds
if (x_cross < segments[k].min[0]):
# check that x_cross is close enough to k to warrant intersection
if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, (
segments[k].mean[0] - np.min(segments[k].data[0]))),max_extend)):
# If it is, update k(min)
segments[k].min[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].min[1] = y_cross
else:
segments[k].max[1] = y_cross
#update j(max)
segments[j].max[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].max[1] = y_cross
else:
segments[j].min[1] = y_cross
else: #x_cross is within bounds of k
# update j(max)
segments[j].max[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].max[1] = y_cross
else:
segments[j].min[1] = y_cross
else:
# If x_cross is outside of segment j's minimal bounds
if (x_cross < segments[j].min[0]):
# check that x_cross is close enough to j to warrant intersection
if((segments[j].min[0] - x_cross) < np.maximum(np.multiply(0.5,(
segments[j].mean[0] - np.min(segments[j].data[0]))),max_extend)):
# If x_cross is outside of segment k's maximal bounds
if (x_cross > segments[k].max[0]):
# check that x_cross is close enough to k to warrant intersection
if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5,(
np.max(segments[k].data[0]) - segments[k].mean[0])),max_extend)):
# If it is, update k(max)
segments[k].max[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].max[1] = y_cross
else:
segments[k].min[1] = y_cross
# update j(min)
segments[j].min[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].min[1] = y_cross
else:
segments[j].max[1] = y_cross
else:
# If x_cross is outside of segment k's minimal bounds
if (x_cross < segments[k].min[0]):
# check that x_cross is close enough to k to warrant intersection
if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, (
segments[k].mean[0] - np.min(segments[k].data[0]))), max_extend)):
# If it is, update k(min)
segments[k].min[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].min[1] = y_cross
else:
segments[k].max[1] = y_cross
# update j(min)
segments[j].min[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].min[1] = y_cross
else:
segments[j].max[1] = y_cross
else: #x_cross is within bounds of k
# update j(max)
segments[j].min[0] = x_cross
if segments[j].slope[0] >= 0:
segments[j].min[1] = y_cross
else:
segments[j].max[1] = y_cross
else: #x_cross is within bounds of j
# If x_cross is outside of segment k's maximals bounds
if (x_cross > segments[k].max[0]):
# check that x_cross is close enough to k to warrant intersection
if ((x_cross - segments[k].max[0]) < np.maximum(np.multiply(0.5,
(np.max(segments[k].data[0]) - segments[k].mean[0])), max_extend)):
# If it is, update k(max)
segments[k].max[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].max[1] = y_cross
else:
segments[k].min[1] = y_cross
else:
# If x_cross is outside of segment k's minimal bounds
if (x_cross < segments[k].min[0]):
# check that x_cross is close enough to k to warrant intersection
if ((segments[k].min[0] - x_cross) < np.maximum(np.multiply(0.5, (
segments[k].mean[0] - np.min(segments[k].data[0]))), max_extend)):
# If it is, update k(min)
segments[k].min[0] = x_cross
if (segments[k].slope[0] >= 0):
segments[k].min[1] = y_cross
else:
segments[k].max[1] = y_cross
#else: # x_cross is within bounds of k
##############################################################################
# Hypothesis 3
# proposal: Connecting the ends of lines will provide more sensible connections
# than connecting intersections
# result: Compact groups, lots of unnecessary crossing lines.
# conclusion: Most lines only need to connect once at each end
if(hypothesis == 3):
max_extend = 6
changeFlag = True
connected = np.zeros((last - first, last - first), dtype=bool)
while(changeFlag):
changeFlag = False
for j in range(first, last):
for k in range(first,last):
if(j < k):
if(connected[j-first,k-first] == False):
#First, do these lines already intersect?
if (segments[j].slope[0] == segments[k].slope[0]):
# They never intersect
intersect = False
else:
x_cross = np.divide((segments[k].intercept[0] - segments[j].intercept[0]),
(segments[j].slope[0] - segments[k].slope[0]))
# y = ax + b
y_cross = np.multiply(segments[j].slope[0], x_cross) + segments[j].intercept[0]
intersect = False
#if((x_cross > segments[k].min[0]) & (x_cross > segments[j].min[0])
# & (x_cross < segments[k].max[0]) & (x_cross < segments[j].max[0])):
# intersect = True
# connected[j-first,k-first] = True
# connected[k-first,j-first] = True
if(intersect == False):
#Are the ends of these lines close together?
distance = np.zeros(4)
#min -> min
distance[0] = np.sqrt(np.sum((np.power(segments[j].start[0] - segments[k].start[0],2),
np.power((segments[j].start[1] - segments[k].start[1]), 2))))
#min -> max
distance[1] = np.sqrt(np.sum((np.power((segments[j].start[0] - segments[k].end[0]),2),
np.power((segments[j].start[1] - segments[k].end[1]), 2))))
#max -> min
distance[2] = np.sqrt(np.sum((np.power((segments[j].end[0] - segments[k].start[0]),2),
np.power((segments[j].end[1] - segments[k].start[1]), 2))))
#max -> max
distance[3] = np.sqrt(np.sum((np.power((segments[j].end[0] - segments[k].end[0]),2),
np.power((segments[j].end[1] - segments[k].end[1]), 2))))
ind = np.argmin(distance)
if distance[ind] < max_extend:
if(distance[ind] == 0):
connected[j - first, k - first] = True
connected[k - first, j - first] = True
else:
changeFlag = True
switcher = {
0: [[segments[j].start[0], segments[j].start[1]], [segments[k].start[0], segments[k].start[1]]],
1: [[segments[j].start[0], segments[j].start[1]], [segments[k].end[0], segments[k].end[1]]],
2: [[segments[j].end[0], segments[j].end[1]], [segments[k].start[0], segments[k].start[1]]],
3: [[segments[j].end[0], segments[j].end[1]], [segments[k].end[0], segments[k].end[1]]],
}
data = switcher.get(ind)
connected[j - first, k - first] = True
connected[k - first, j - first] = True
segments = np.insert(segments, last, sm.line_of_best_fit(data))
segments[last].start = [data[0][0], data[0][1]]
segments[last].end = [data[1][0], data[1][1]]
segmentParent[i:] = segmentParent[i:]+1
##############################################################################
# Hypothesis 4
# proposal: A greedy search for new end-of-line connections up to a maximum of 1 connection at each end
# Followed by a greedy search for loose end-of-line connections
# result: Much tidier groups, though lines appear jittery.
# conclusion: It might be better to move nodes rather than draw new edges.
if (hypothesis == 4):
big_number = 9999999999999
max_extend = 6
connected_lines = np.zeros(last - first,dtype=bool)
connected = np.zeros((last-first, last-first),dtype=bool)
#for j in range(first, last):
# for k in range(first, last):
# if (j < k):
# First, do these lines already intersect?
#if (segments[j].slope[0] == segments[k].slope[0]):
# They never intersect, but could connect
# if(segments[j].intercept[0] == segments[k].intercept[0]):
#They are on the same line
#Only need to check x value equality, since lines are parallel
# if(((segments[j].start[0] >= segments[k].start[0])
# & (segments[j].start[0] <= segments[k].end[0]))
# ^ ((segments[j].start[0] >= segments[k].end[0])
# & (segments[j].start[0] <= segments[k].start[0]))):
## segments[j].start_connect = k
# connected[j-first, k-first] = True
## connected[k-first, j-first] = True
# if (((segments[j].end[0] >= segments[k].start[0])
# & (segments[j].end[0] <= segments[k].end[0]))
# ^ ((segments[j].end[0] >= segments[k].end[0])
# & (segments[j].end[0] <= segments[k].start[0]))):
# segments[j].end_connect = k
# connected[j-first, k-first] = True
# connected[k-first, j-first] = True
# if (((segments[k].start[0] >= segments[j].start[0])
# & (segments[k].start[0] <= segments[j].end[0]))
# ^ ((segments[k].start[0] >= segments[j].end[0])
# & (segments[k].start[0] <= segments[j].start[0]))):
# segments[k].start_connect = j
# connected[j-first, k-first] = True
# connected[k-first, j-first] = True
# if (((segments[k].end[0] >= segments[j].start[0])
### & (segments[k].end[0] <= segments[j].end[0]))
# ^ ((segments[k].end[0] >= segments[j].end[0])
# & (segments[k].end[0] <= segments[j].start[0]))):
# segments[k].end_connect = j
# connected[j-first, k-first] = True
# connected[k-first, j-first] = True#
# The next pair of conditions should NEVER occur
# However, the check has been included for sanity
# if((segments[j].end_connect == k)
# & (segments[j].start_connect == k)):
# #(Line j < Line k) ^ (Line j = Line k)
# np.delete(segments, j, 0)
# last = last - 1
# segmentParent[i:] = segmentParent[i:] + -1
# np.delete(connected_lines, j-first, 0)
# np.delete(connected, j-first, 0)
# np.delete(connected, j-first, 1)
# else:
# if ((segments[k].end_connect == j)
# & (segments[k].start_connect == j)):
# #Line k < Line j
# np.delete(segments, k, 0)
# last = last - 1
# segmentParent[i:] = segmentParent[i:] + -1
# np.delete(connected_lines, k-first, 0)
# np.delete(connected, k-first, 0)
# np.delete(connected, k-first, 1)
#The lines are not parallel, continue intersection check
#else:
# x = (b2 - b1)/(a1 - a2)
# x_cross = np.rint(np.divide(
# (segments[k].intercept[0] - segments[j].intercept[0]),
# (segments[j].slope[0] - segments[k].slope[0])))
#This introduces bugs due to errors introduced through division
#Rounding could help, but the direction of rounding would need to be know
#if ((x_cross >= segments[k].min[0]) & (x_cross >= segments[j].min[0])
# & (x_cross <= segments[k].max[0]) & (x_cross <= segments[j].max[0])):
# #Lines intersect!
# #But where...?
# if(abs(segments[k].end[0] - x_cross) < abs(segments[k].start[0] - x_cross)):
# segments[k].end_connect = j
# else:
# segments[k].start_connect = j
# if(abs(segments[j].end[0] - x_cross) < abs(segments[j].start[0] - x_cross)):
# segments[j].end_connect = k
# else:
# segments[j].start_connect = k
# connected[j-first,k-first] = True
# connected[k-first,j-first] = True
#If start and end of line is connected, then do not connect them again
#for j in range(first, last):
# if ((segments[j].start_connect >= 0) & (segments[j].end_connect >= 0)):
# connected_lines[j-first] = True
#Find lines that haven't been fully connected yet
unconnected = np.where(connected_lines == False)[0]+first
num_lines = unconnected.shape[0]
#Build adjacency matrix for lines that haven't been connected
line_adjacency = np.zeros((num_lines, num_lines,4), dtype=float)
#For lines that haven't been fully connected...
##########Calculate line end distances
for j in range(num_lines):
for k in range(num_lines):
if j < k:
#Not considering joined pairs of partially connected lines
if(connected[j,k] == True):
line_adjacency[j,k,0] = big_number
line_adjacency[j,k,1] = big_number
line_adjacency[j,k,2] = big_number
line_adjacency[j,k,3] = big_number
else:
#Measure the distance between the ends of the lines
#Ensure that lines are unconnected before measuring distance
# start -> start
line_adjacency[j,k,:] = sm.line_distances(segments[unconnected[j]],segments[unconnected[k]])
else:
if(j == k):
line_adjacency[j, k, 0] = big_number
line_adjacency[j, k, 1] = big_number
line_adjacency[j, k, 2] = big_number
line_adjacency[j, k, 3] = big_number
else:
# If line has already been processed, copy distance values
line_adjacency[j, k,0] = line_adjacency[k, j,0]
line_adjacency[j, k,1] = line_adjacency[k, j,2]
line_adjacency[j, k,2] = line_adjacency[k, j,1]
line_adjacency[j, k,3] = line_adjacency[k, j,3]
connect_flag = True
l = 0
#Whilst there are still partially connected lines less than [max_extend] distance apart
while(connect_flag == True):
#Find the shortest distance (greedy strategy)
# argmin gives flatIndex,
# use unravel_index with array shape to return 3d index
#If the shortest distance is acceptable
if line_adjacency.size == 0:
connect_flag = False
else:
j, k, l = np.unravel_index(np.argmin(line_adjacency), line_adjacency.shape)
if line_adjacency[j,k,l] < max_extend:
if(line_adjacency[j,k,l] == 0):
node = sm.attach_lines(segments[unconnected[j]], segments[unconnected[k]], l)
if (node.id >= num_nodes):
nodes.append(node)
num_nodes += 1
connected[k, j] = True
connected[j, k] = True
line_adjacency[j, k, :] = big_number
line_adjacency[k, j, :] = big_number
else:
#Create a new line to bridge the distance
segments = np.insert(segments, last,
sm.connect_lines(segments[unconnected[j]], segments[unconnected[k]], l))
if (segments[last].nodes[0] is not None):
if (segments[last].nodes[0].id >= num_nodes):
nodes.append(segments[last].nodes[0])
num_nodes += 1
if (segments[last].nodes[1] is not None):
if (segments[last].nodes[1].id >= num_nodes):
nodes.append(segments[last].nodes[1])
num_nodes += 1
segmentParent[i:] = segmentParent[i:] + 1
connected = np.hstack((connected, np.zeros((last-first, 1), dtype=bool)))
connected = np.vstack((connected, np.zeros((1,last-first+1), dtype=bool)))
connected[k, last-first] = True
connected[j, last-first] = True
connected[last-first, k] = True
connected[last-first, j] = True
connected[k,j] = True
connected[j,k] = True
line_adjacency[j, k, :] = big_number
line_adjacency[k, j, :] = big_number
#Adjacency switcher is used to select relevant line_adjacency values
#For each 'connection made type' row:
#First values identify connections types that line1 can no longer make
#Second values identify connections types that line2 can no longer make
#Third values identify connections types that j can no longer receive
#Fourth values identify connections types that k can no longer receive
adjacency_switcher = {
0: [[0, 1],[0, 1],[0, 2],[0, 2]], #Type start->start
1: [[0, 1],[2, 3],[0, 2],[1, 3]], #Type start->end
2: [[2, 3],[0, 1],[1, 3],[0, 2]], #Type end->start
3: [[2, 3],[2, 3],[1, 3],[1, 3]], #Type end->end
}
inds = adjacency_switcher[l]
line_adjacency[j,:,inds[0]] = big_number
line_adjacency[k,:,inds[1]] = big_number
line_adjacency[:,j,inds[2]] = big_number
line_adjacency[:,k,inds[3]] = big_number
last = last + 1
diff = 0
if ((segments[unconnected[j]].start_connect >= 0) & (segments[unconnected[j]].end_connect >= 0)):
connected_lines[j] = True
unconnected = np.delete(unconnected, j, 0)
line_adjacency = np.delete(line_adjacency, j, 0)
line_adjacency = np.delete(line_adjacency, j, 1)
num_lines = num_lines - 1
if k > j:
diff = 1
if ((segments[unconnected[k-diff]].start_connect >= 0) & (segments[unconnected[k-diff]].end_connect >= 0)):
connected_lines[k] = True
unconnected = np.delete(unconnected, k-diff, 0)
line_adjacency = np.delete(line_adjacency, k-diff, 0)
line_adjacency = np.delete(line_adjacency, k-diff, 1)
num_lines = num_lines - 1
else:
connect_flag = False
#Now there are only partially connected lines remaining
#We should see if these can connect to any nearby lines
num_remain = unconnected.shape[0]
#unconnected have been being deleted upon full-connection during previous step
line_adjacency = np.zeros((last-first, 4))
#max_extend = 10
for j in range(num_remain):
for k in range(last-first):
#Cannot connect to self
if(unconnected[j] == k+first):
line_adjacency[k, :] = big_number
else:
#Cannot reconnect over previously connections
if(connected[unconnected[j]-first,k] == True):
line_adjacency[k,:] = big_number
else:
#Measure distance to all other ends of lines
if(segments[unconnected[j]].start_connect < 0):
line_adjacency[k, 0] = sm.point_distance(segments[unconnected[j]].start,segments[k+first].start)
line_adjacency[k, 1] = sm.point_distance(segments[unconnected[j]].start,segments[k+first].end)
else:
line_adjacency[k, 0] = big_number
line_adjacency[k, 1] = big_number
if(segments[unconnected[j]].end_connect < 0):
line_adjacency[k, 2] = sm.point_distance(segments[unconnected[j]].end,segments[k+first].start)
line_adjacency[k, 3] = sm.point_distance(segments[unconnected[j]].end,segments[k+first].end)
else:
line_adjacency[k, 2] = big_number
line_adjacency[k, 3] = big_number
# sm.line_distances(segments[unconnected[j]],segments[k+first])
k, l = np.unravel_index(np.argmin(line_adjacency), line_adjacency.shape)
#If shortest distance is below threshold, make connection
if line_adjacency[k,l] < max_extend:
if (line_adjacency[k,l] == 0): #If shortest distance indicates prior connection, form connection formally
connected[unconnected[j] - first, k] = True
connected[k, unconnected[j] - first] = True
node = sm.attach_lines(segments[unconnected[j]], segments[k+first], l)
if (node.id >= num_nodes):
nodes.append(node)
num_nodes += 1
else:
changeFlag = True
segments = np.insert(segments, last,
sm.connect_lines(segments[unconnected[j]], segments[k+first], l))
if (segments[last].nodes[0] is not None):
if (segments[last].nodes[0].id >= num_nodes):
nodes.append(segments[last].nodes[0])
num_nodes += 1
if (segments[last].nodes[1] is not None):
if (segments[last].nodes[1].id >= num_nodes):
nodes.append(segments[last].nodes[1])
num_nodes += 1
connected[unconnected[j] - first, k] = True
connected[k, unconnected[j] - first] = True
segmentParent[i:] = segmentParent[i:] + 1
connected = np.hstack((connected, np.zeros((last - first, 1), dtype=bool)))
connected = np.vstack((connected, np.zeros((1, last - first + 1), dtype=bool)))
connected[k, last-first] = True
connected[unconnected[j]-first, last-first] = True
connected[last-first, k] = True
connected[last-first, unconnected[j]-first] = True
line_adjacency[k, :] = big_number
if((k+first) in unconnected):
line_adjacency[np.where(unconnected==(k+first))[0]] = big_number
line_adjacency = np.vstack((line_adjacency, np.multiply(np.ones((1,4)),big_number)))
last = last + 1
#print(checkCycles(segments[first:last]))
#plt.axes(axarr[1])
#axarr[1].imshow(image, cmap=plt.cm.gray)
#for m in range(first):
# plt.plot([segments[m].start[1], segments[m].end[1]], [segments[m].start[0], segments[m].end[0]], 'r-')
#
# for m in range(first,last):
# plt.plot([segments[m].start[1], segments[m].end[1]], [segments[m].start[0], segments[m].end[0]], 'g-')
graph = sm.getEdges(segments[first:last])
nodes2 = sm.getNodes(segments[first:last])
cycles = sm.find_nxCycle(graph)
#print(cycles)
cycles = sm.mergeCycles(cycles)
boxes = sm.findBounds(cycles, nodes2)
for box in boxes:
coord, width, height = sm.boxToMatplotPatch(box)
# axarr[1].add_patch(
# patches.Rectangle(
# coord, width, height,#(x,y), width, height
# fill=False
# )
# )
im = image[
int(np.maximum(0, coord[1] - np.floor_divide(height, 4))):int(
np.minimum(coord[1] + height + np.floor_divide(height, 4), edges.shape[0])),
int(np.maximum(0, coord[0] - np.floor_divide(width, 4))):int(
np.minimum(coord[0] + width + np.floor_divide(width, 4), edges.shape[1]))]
if ((width >= 20) & (height >= 20)):
# Big enough for 1px DEM
if ((width > 160) & (height > 160)):
# Big enough for 8px DEM
# coord = x1,y1
# x2 = x1+width
# y2 = y1+height
filename = base_folder + "craters/" + base_filename + '_big_crater' + str(
num_bigcraters) + '_at_x' + str(int(np.floor_divide(coord[0]))) + 'w' + str(int(np.floor_divide(width))) + 'at_y' + str(
int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height)))
num_bigcraters = num_bigcraters + 1
else:
filename = base_folder + "craters/" + base_filename + '_crater' + str(
num_craters) + '_at_x' + str(int(np.floor_divide(coord[0],1))) + 'w' + str(int(np.floor_divide(width))) + 'at_y' + str(
int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height)))
num_craters = num_craters + 1
else:
filename = base_folder + "craters/" + base_filename + '_little_crater' + str(
num_lil_craters) + '_at_x' + str(int(np.floor_divide(coord[0]))) + 'w' + str(int(np.floor_divide(width)))+ 'at_y' + str(
int(np.floor_divide(coord[1]))) + 'h' + str(int(np.floor_divide(height)))
num_lil_craters = num_lil_craters + 1
im2 = Image.fromarray(im)
im2.save(filename + '.png')
#cycles = sm.findCycles(drawGraph(segments[first:last]))
#if (len(cycles) > 0):
# print(cycles)
#y1 = (np.multiply(line.slope, minX) + line.intercept)[0][0]
#a = np.divide(np.ones(len(line.slope)), line.slope)
#b = y1 - np.multiply(a, minX)
#x2 = np.divide(line.intercept - b, a - line.slope)
#y2 = (line.slope * x2) + line.intercept
#if x2 < minX:
# minX = x2
#if y2 < minY:
# minY = y2
#x1 = (np.divide((minY - line.intercept),line.slope))[0][0]
#y1 = (np.multiply(line.slope, minX) + line.intercept)[0][0]
#x2 = (np.divide((maxY - line.intercept), line.slope))[0][0]
#y2 = (np.multiply(line.slope, maxX) + line.intercept)[0][0]
#if(y1 > minY):
# y1 = minY
#x1 = minX
#y2 = (np.multiply(line.slope, maxX) + line.intercept)[0][0]
#if(y2 < maxY):
# y2 = maxY
#x2 = maxX
#for line in segments:
# If negative correlation, then [minX, maxY], [maxX, minY]
# plt.plot([line.start[1], line.end[1]], [line.start[0], line.end[0]], 'r-')
#if (line.slope[0] > 0):
# plt.plot([line.min[1], line.max[1]], [line.min[0], line.max[0]], 'r-')
#else:
# plt.plot([line.min[1], line.max[1]], [line.max[0], line.min[0]], 'r-')
print("end") | gpl-3.0 |
dsm054/pandas | pandas/tests/indexes/test_base.py | 1 | 101055 | # -*- coding: utf-8 -*-
import math
import operator
from collections import defaultdict
from datetime import datetime, timedelta
from decimal import Decimal
import numpy as np
import pytest
import pandas as pd
import pandas.core.config as cf
import pandas.util.testing as tm
from pandas import (
CategoricalIndex, DataFrame, DatetimeIndex, Float64Index, Int64Index,
PeriodIndex, RangeIndex, Series, TimedeltaIndex, UInt64Index, date_range,
isna, period_range
)
from pandas._libs.tslib import Timestamp
from pandas.compat import (
PY3, PY35, PY36, StringIO, lrange, lzip, range, text_type, u, zip
)
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas.core.dtypes.generic import ABCIndex
from pandas.core.index import _get_combined_index, ensure_index_from_sequences
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.indexes.datetimes import _to_m8
from pandas.tests.indexes.common import Base
from pandas.util.testing import assert_almost_equal
class TestIndex(Base):
_holder = Index
def setup_method(self, method):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
uintIndex=tm.makeUIntIndex(100),
rangeIndex=tm.makeRangeIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])),
repeats=Index([0, 0, 1, 1, 2, 2]))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def generate_index_types(self, skip_index_keys=[]):
"""
Return a generator of the various index types, leaving
out the ones with a key in skip_index_keys
"""
for key, index in self.indices.items():
if key not in skip_index_keys:
yield key, index
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
def test_new_axis(self):
new_index = self.dateIndex[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self, indices):
super(TestIndex, self).test_copy_and_deepcopy(indices)
new_copy2 = self.intIndex.copy(dtype=int)
assert new_copy2.dtype.kind == 'i'
@pytest.mark.parametrize("attr", ['strIndex', 'dateIndex'])
def test_constructor_regular(self, attr):
# regular instance creation
index = getattr(self, attr)
tm.assert_contains_all(index, index)
def test_constructor_casting(self):
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
def test_constructor_copy(self):
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
assert isinstance(index, Index)
assert index.name == 'name'
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert index[0] != "SOMEBIGLONGSTRING"
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
pytest.raises(TypeError, Index, 0)
@pytest.mark.parametrize("index_vals", [
[('A', 1), 'B'], ['B', ('A', 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
@pytest.mark.parametrize('na_value', [None, np.nan])
@pytest.mark.parametrize('vtype', [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH 18505 : valid tuples containing NaN
values = [(1, 'two'), (3., na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize("index", [
pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern', name='Green Eggs & Ham'), # DTI with tz
pd.date_range('2015-01-01 10:00', freq='D', periods=3), # DTI no tz
pd.timedelta_range('1 days', freq='D', periods=3), # td
pd.period_range('2015-01-01', freq='D', periods=3) # period
])
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex):
assert result.tz == index.tz
if cast_as_obj:
# GH#23524 check that Index(dti, dtype=object) does not
# incorrectly raise ValueError, and that nanoseconds are not
# dropped
index += pd.Timedelta(nanoseconds=50)
result = pd.Index(index, dtype=object)
assert result.dtype == np.object_
assert list(result) == list(index)
@pytest.mark.parametrize("index,has_tz", [
(pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern'), True), # datetimetz
(pd.timedelta_range('1 days', freq='D', periods=3), False), # td
(pd.period_range('2015-01-01', freq='D', periods=3), False) # period
])
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
@pytest.mark.parametrize("klass", [Index, DatetimeIndex])
def test_constructor_from_series(self, klass):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = klass(s)
tm.assert_index_equal(result, expected)
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq='MS')
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
expected = DatetimeIndex(dts, freq='MS')
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = dts
result = DatetimeIndex(df['date'], freq='MS')
assert df['date'].dtype == object
expected.name = 'date'
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name='date')
tm.assert_series_equal(df['date'], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df['date'])
assert freq == 'MS'
@pytest.mark.parametrize("array", [
np.arange(5), np.array(['a', 'b', 'c']), date_range(
'2000-01-01', periods=3).values
])
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('dtype', [
int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32',
'uint16', 'uint8'])
def test_constructor_int_dtype_float(self, dtype):
# GH 18400
if is_unsigned_integer_dtype(dtype):
index_type = UInt64Index
else:
index_type = Int64Index
expected = index_type([0, 1, 2, 3])
result = Index([0., 1., 2., 3.], dtype=dtype)
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype='float')
tm.assert_index_equal(result, expected)
def test_droplevel(self, indices):
# GH 21115
if isinstance(indices, MultiIndex):
# Tested separately in test_multi.py
return
assert indices.droplevel([]).equals(indices)
for level in indices.name, [indices.name]:
if isinstance(indices.name, tuple) and level is indices.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
indices.droplevel(level)
for level in 'wrong', ['wrong']:
with pytest.raises(KeyError):
indices.droplevel(level)
@pytest.mark.parametrize("dtype", ['int64', 'uint64'])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
@pytest.mark.parametrize("klass,dtype,na_val", [
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, 'datetime64[ns]', pd.NaT)
])
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos", [0, 1])
@pytest.mark.parametrize("klass,dtype,ctor", [
(pd.DatetimeIndex, 'datetime64[ns]', np.datetime64('nat')),
(pd.TimedeltaIndex, 'timedelta64[ns]', np.timedelta64('nat'))
])
def test_index_ctor_infer_nat_dt_like(self, pos, klass, dtype, ctor,
nulls_fixture):
expected = klass([pd.NaT, pd.NaT])
assert expected.dtype == dtype
data = [ctor]
data.insert(pos, nulls_fixture)
result = Index(data)
tm.assert_index_equal(result, expected)
result = Index(np.array(data, dtype=object))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("swap_objs", [True, False])
def test_index_ctor_nat_result(self, swap_objs):
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
if swap_objs:
data = data[::-1]
expected = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), expected)
tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
assert isinstance(rs, PeriodIndex)
@pytest.mark.parametrize("vals,dtype", [
([1, 2, 3, 4, 5], 'int'), ([1.1, np.nan, 2.2, 3.0], 'float'),
(['A', 'B', 'C', np.nan], 'obj')
])
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3]), np.array([1, 2, 3], dtype=int),
# below should coerce
[1., 2., 3.], np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize("vals", [
[1, 2, 3], [1., 2., 3.], np.array([1., 2., 3.]),
np.array([1, 2, 3], dtype=int), np.array([1., 2., 3.], dtype=float)
])
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
[True, False, True], np.array([True, False, True], dtype=bool)
])
def test_constructor_dtypes_to_object(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=bool)
else:
index = Index(vals)
assert isinstance(index, Index)
assert index.dtype == object
@pytest.mark.parametrize("vals", [
[1, 2, 3], np.array([1, 2, 3], dtype=int),
np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
[datetime(2011, 1, 1), datetime(2011, 1, 2)]
])
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype='category')
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])
])
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize("vals", [
np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]),
[timedelta(1), timedelta(1)]
])
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr, utc", [
['values', False],
['asi8', True]])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc,
klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
index = pd.date_range('2011-01-01', periods=5)
arg = getattr(index, attr)
if utc:
index = index.tz_localize('UTC').tz_convert(tz_naive_fixture)
else:
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ['values', 'asi8'])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range('1 days', periods=5)
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (x for x in [])])
@pytest.mark.parametrize("klass",
[Index, Float64Index, Int64Index, UInt64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex])
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize("empty,klass", [
(PeriodIndex([], freq='B'), PeriodIndex),
(PeriodIndex(iter([]), freq='B'), PeriodIndex),
(PeriodIndex((x for x in []), freq='B'), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ['blue', 'red']],
labels=[[], []]), MultiIndex)
])
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_non_hashable_name(self, indices):
# GH 20527
if isinstance(indices, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [['1']]
# With .rename()
with pytest.raises(TypeError, match=message):
indices.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
indices.set_names(names=renamed)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = ("The elements provided in the data cannot "
"all be casted to the dtype int64")
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
@pytest.mark.xfail(reason="see GH#21311: Index "
"doesn't enforce dtype argument",
strict=True)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
pytest.raises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
assert casted.name == 'foobar'
def test_equals_object(self):
# same
assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))
@pytest.mark.parametrize("comp", [
Index(['a', 'b']), Index(['a', 'b', 'd']), ['a', 'b', 'c']])
def test_not_equals_object(self, comp):
assert not Index(['a', 'b', 'c']).equals(comp)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_insert_missing(self, nulls_fixture):
# GH 22295
# test there is no mangling of NA values
expected = Index(['a', nulls_fixture, 'b', 'c'])
result = Index(list('abc')).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("pos,expected", [
(0, Index(['b', 'c', 'd'], name='index')),
(-1, Index(['a', 'b', 'c'], name='index'))
])
def test_delete(self, pos, expected):
index = Index(['a', 'b', 'c', 'd'], name='index')
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_delete_raises(self):
index = Index(['a', 'b', 'c', 'd'], name='index')
with pytest.raises((IndexError, ValueError)):
# either depending on numpy version
index.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
assert i1.identical(i2)
i1 = i1.rename('foo')
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename('foo')
assert i1.identical(i2)
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = 'bob'
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
def test_asof(self):
d = self.dateIndex[0]
assert self.dateIndex.asof(d) == d
assert isna(self.dateIndex.asof(d - timedelta(1)))
d = self.dateIndex[-1]
assert self.dateIndex.asof(d + timedelta(1)) == d
d = self.dateIndex[0].to_pydatetime()
assert isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = index.asof('2010-02')
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+'
'0000', 'ns')
assert first_value == x[Timestamp(expected_ts)]
@pytest.mark.parametrize("op", [
operator.eq, operator.ne, operator.gt, operator.lt,
operator.ge, operator.le
])
def test_comparators(self, op):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
def test_booleanindex(self):
boolIndex = np.repeat(True, len(self.strIndex)).astype(bool)
boolIndex[5:30:2] = False
subIndex = self.strIndex[boolIndex]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
subIndex = self.strIndex[list(boolIndex)]
for i, val in enumerate(subIndex):
assert subIndex.get_loc(val) == i
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, attr, dtype):
empty_arr = np.array([], dtype=dtype)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("attr", [
'strIndex', 'intIndex', 'floatIndex'])
def test_empty_fancy_raises(self, attr):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
index = getattr(self, attr)
empty_index = index.__class__([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
pytest.raises(IndexError, index.__getitem__, empty_farr)
@pytest.mark.parametrize("itm", [101, 'no_int'])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, indices, itm):
with pytest.raises(IndexError):
indices[itm]
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first)
assert inter is first
@pytest.mark.parametrize("index2,keeps_name", [
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False)])
def test_intersection_name_preservation(self, index2, keeps_name):
index1 = Index([1, 2, 3, 4, 5], name='index')
expected = Index([3, 4, 5])
result = index1.intersection(index2)
if keeps_name:
expected.name = 'index'
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("first_name,second_name,expected_name", [
('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)])
def test_intersection_name_preservation2(self, first_name, second_name,
expected_name):
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second)
assert intersect.name == expected_name
@pytest.mark.parametrize("index2,keeps_name", [
(Index([4, 7, 6, 5, 3], name='index'), True),
(Index([4, 7, 6, 5, 3], name='other'), False)])
def test_intersection_monotonic(self, index2, keeps_name):
index1 = Index([5, 3, 2, 4, 1], name='index')
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index2,expected_arr", [
(Index(['B', 'D']), ['B']),
(Index(['B', 'D', 'A']), ['A', 'B', 'A'])])
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr):
# non-monotonic non-unique
index1 = Index(['A', 'B', 'A', 'C'])
expected = Index(expected_arr, dtype='object')
result = index1.intersection(index2)
tm.assert_index_equal(result, expected)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(['aa'], dtype=object)
result = i2.intersection(i1)
assert len(result) == 0
@pytest.mark.parametrize(
'fname, sname, expected_name',
[
('A', 'A', 'A'),
('A', 'B', None),
('A', None, None),
(None, 'B', None),
(None, None, None),
])
def test_corner_union(self, indices, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(indices, MultiIndex) or not indices.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = indices.copy().set_names(fname)
second = indices.copy().set_names(sname)
union = first.union(second)
expected = indices.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = indices.copy().set_names(fname)
second = indices.drop(indices).set_names(sname)
union = first.union(second)
expected = indices.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = indices.drop(indices).set_names(fname)
second = indices.copy().set_names(sname)
union = first.union(second)
expected = indices.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = indices.drop(indices).set_names(fname)
second = indices.drop(indices).set_names(sname)
union = first.union(second)
expected = indices.drop(indices).set_names(expected_name)
tm.assert_index_equal(union, expected)
def test_chained_union(self):
# Chained unions handles names correctly
i1 = Index([1, 2], name='i1')
i2 = Index([3, 4], name='i2')
i3 = Index([5, 6], name='i3')
union = i1.union(i2.union(i3))
expected = i1.union(i2).union(i3)
tm.assert_index_equal(union, expected)
j1 = Index([1, 2], name='j1')
j2 = Index([], name='j2')
j3 = Index([], name='j3')
union = j1.union(j2.union(j3))
expected = j1.union(j2).union(j3)
tm.assert_index_equal(union, expected)
def test_union(self):
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
assert tm.equalContents(union, everything)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_from_iterables(self, klass):
# GH 10149
# TODO: Replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
case = klass(second.values)
result = first.union(case)
assert tm.equalContents(result, everything)
def test_union_identity(self):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
union = first.union(first)
assert union is first
union = first.union([])
assert union is first
union = Index([]).union(first)
assert union is first
@pytest.mark.parametrize("first_list", [list('ab'), list()])
@pytest.mark.parametrize("second_list", [list('ab'), list()])
@pytest.mark.parametrize("first_name, second_name, expected_name", [
('A', 'B', None), (None, 'B', None), ('A', None, None)])
def test_union_name_preservation(self, first_list, second_list, first_name,
second_name, expected_name):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second)
vals = sorted(set(first_list).union(second_list))
expected = Index(vals, name=expected_name)
tm.assert_index_equal(union, expected)
def test_union_dt_as_obj(self):
# TODO: Replace with fixturesult
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
assert tm.equalContents(firstCat, appended)
assert tm.equalContents(secondCat, self.strIndex)
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
index = self.strIndex
expected = Index(self.strIndex.values * 2)
tm.assert_index_equal(index + index, expected)
tm.assert_index_equal(index + index.tolist(), expected)
tm.assert_index_equal(index.tolist() + index, expected)
# test add and radd
index = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
tm.assert_index_equal(index + '1', expected)
expected = Index(['1a', '1b', '1c'])
tm.assert_index_equal('1' + index, expected)
def test_sub_fail(self):
index = self.strIndex
pytest.raises(TypeError, lambda: index - 'a')
pytest.raises(TypeError, lambda: index - index)
pytest.raises(TypeError, lambda: index - index.tolist())
pytest.raises(TypeError, lambda: index.tolist() - index)
def test_sub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(0), Decimal(1)])
result = index - Decimal(1)
tm.assert_index_equal(result, expected)
result = index - pd.Index([Decimal(1), Decimal(1)])
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
index - 'foo'
with pytest.raises(TypeError):
index - np.array([2, 'foo'])
def test_rsub_object(self):
# GH#19369
index = pd.Index([Decimal(1), Decimal(2)])
expected = pd.Index([Decimal(1), Decimal(0)])
result = Decimal(2) - index
tm.assert_index_equal(result, expected)
result = np.array([Decimal(2), Decimal(2)]) - index
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
'foo' - index
with pytest.raises(TypeError):
np.array([True, pd.Timestamp.now()]) - index
def test_map_identity_mapping(self):
# GH 12766
# TODO: replace with fixture
for name, cur_index in self.indices.items():
tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ['foo', 'bar', 'baz']
multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize("attr", [
'makeDateIndex', 'makePeriodIndex', 'makeTimedeltaIndex'])
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq='h', name='hourly')
expected = Index(range(24), name='hourly')
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
# GH 12756
expected = Index(['foo', 'bar', 'baz'])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
# TODO: replace with fixture
for name in self.indices.keys():
if name == 'catIndex':
# Tested in test_categorical
continue
elif name == 'repeats':
# Cannot map duplicated index
continue
index = self.indices[name]
expected = Index(np.arange(len(index), 0, -1))
# to match proper result coercion for uints
if name == 'empty':
expected = Index([])
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("mapper", [
Series(['foo', 2., 'baz'], index=[0, 2, -1]),
{0: 'foo', 2: 2.0, -1: 'baz'}])
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2., np.nan, 'foo'])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action='ignore')
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = index.map(default_dict)
expected = Index(['stuff', 'blank', 'blank'])
tm.assert_index_equal(result, expected)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("name,expected", [
('foo', 'foo'), ('bar', None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name='foo')
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
assert 'a' not in index2
assert 'afoo' in index2
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
assert 'a' in index
index += '_x'
assert 'a_x' in index
@pytest.mark.parametrize("second_name,expected", [
(None, None), ('name', 'name')])
def test_difference_name_preservation(self, second_name, expected):
# TODO: replace with fixturesult
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
second.name = second_name
result = first.difference(second)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
def test_difference_empty_arg(self):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference([])
assert tm.equalContents(result, first)
assert result.name == first.name
def test_difference_identity(self):
first = self.strIndex[5:20]
first.name == 'name'
result = first.difference(first)
assert len(result) == 0
assert result.name == first.name
def test_symmetric_difference(self):
# smoke
index1 = Index([1, 2, 3, 4], name='index1')
index2 = Index([2, 3, 4, 5])
result = index1.symmetric_difference(index2)
expected = Index([1, 5])
assert tm.equalContents(result, expected)
assert result.name is None
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
def test_symmetric_difference_mi(self):
index1 = MultiIndex.from_tuples(self.tuples)
index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = index1.symmetric_difference(index2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
assert tm.equalContents(result, expected)
@pytest.mark.parametrize("index2,expected", [
(Index([0, 1, np.nan]), Index([0.0, 2.0, 3.0])),
(Index([0, 1]), Index([0.0, 2.0, 3.0, np.nan]))])
def test_symmetric_difference_missing(self, index2, expected):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2)
tm.assert_index_equal(result, expected)
def test_symmetric_difference_non_index(self):
index1 = Index([1, 2, 3, 4], name='index1')
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2)
assert tm.equalContents(result, expected)
assert result.name == 'index1'
result = index1.symmetric_difference(index2, result_name='new_name')
assert tm.equalContents(result, expected)
assert result.name == 'new_name'
def test_difference_type(self):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
result = index.difference(index)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
def test_intersection_difference(self):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
skip_index_keys = ['repeats']
for key, index in self.generate_index_types(skip_index_keys):
inter = index.intersection(index.drop(index))
diff = index.difference(index)
tm.assert_index_equal(inter, diff)
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', True), ('dateIndex', False), ('floatIndex', True)])
def test_is_numeric(self, attr, expected):
assert getattr(self, attr).is_numeric() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', True), ('boolIndex', True), ('catIndex', False),
('intIndex', False), ('dateIndex', False), ('floatIndex', False)])
def test_is_object(self, attr, expected):
assert getattr(self, attr).is_object() == expected
@pytest.mark.parametrize("attr,expected", [
('strIndex', False), ('boolIndex', False), ('catIndex', False),
('intIndex', False), ('dateIndex', True), ('floatIndex', False)])
def test_is_all_dates(self, attr, expected):
assert getattr(self, attr).is_all_dates == expected
def test_summary(self):
self._check_method_works(Index._summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind._summary()
# shouldn't be formatted accidentally.
assert '~:{range}:0' in result
assert '{other}%s' in result
# GH18217
def test_summary_deprecated(self):
ind = Index(['{other}%s', "~:{range}:0"], name='A')
with tm.assert_produces_warning(FutureWarning):
ind.summary()
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
self.strIndex[:0].format()
@pytest.mark.parametrize("vals", [
[1, 2.0 + 3.0j, 4.], ['a', 'b', 'c']])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), u('NaN')]
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
assert formatted[0] == 'something'
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ['any', 'all'])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method):
# TODO: make this a dedicated test with parametrized methods
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("expected,method", [
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'pad'),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), 'ffill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'backfill'),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), 'bfill')])
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match='tolerance argument'):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
'method, tolerance, indexer, expected',
[
('pad', None, [0, 5, 9], [0, 5, 9]),
('backfill', None, [0, 5, 9], [0, 5, 9]),
('nearest', None, [0, 5, 9], [0, 5, 9]),
('pad', 0, [0, 5, 9], [0, 5, 9]),
('backfill', 0, [0, 5, 9], [0, 5, 9]),
('nearest', 0, [0, 5, 9], [0, 5, 9]),
('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),
('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),
('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),
('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
@pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])
@pytest.mark.parametrize(
'tolerance, expected',
list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],
[0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1],
[-1, 2, 9]])))
def test_get_indexer_nearest_listlike_tolerance(self, tolerance,
expected, listtype):
index = Index(np.arange(10))
actual = index.get_indexer([0.2, 1.8, 8.5], method='nearest',
tolerance=listtype(tolerance))
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match='limit argument'):
index.get_indexer([1, 0], method='nearest', limit=1)
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_indexer([1, 0], method='nearest',
tolerance=[1, 2, 3])
@pytest.mark.parametrize("method,expected", [
('pad', [8, 7, 0]), ('backfill', [9, 8, 1]), ('nearest', [9, 7, 0])])
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("method,expected", [
('pad', np.array([-1, 0, 1, 1], dtype=np.intp)),
('backfill', np.array([0, 0, 1, -1], dtype=np.intp))])
def test_get_indexer_strings(self, method, expected):
index = pd.Index(['b', 'c'])
actual = index.get_indexer(['a', 'b', 'c', 'd'], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(['b', 'c'])
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
with pytest.raises(TypeError):
index.get_indexer(['a', 'b', 'c', 'd'], method='pad',
tolerance=[2, 2, 2, 2])
def test_get_indexer_numeric_index_boolean_target(self):
# GH 16877
numeric_index = pd.Index(range(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_with_NA_values(self, unique_nulls_fixture,
unique_nulls_fixture2):
# GH 22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture,
unique_nulls_fixture2], dtype=np.object)
index = pd.Index(arr, dtype=np.object)
result = index.get_indexer([unique_nulls_fixture,
unique_nulls_fixture2, 'Unknown'])
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, 'pad', 'backfill', 'nearest'])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
# Messages vary across versions
if PY36:
msg = 'not supported between'
elif PY35:
msg = 'unorderable types'
else:
if method == 'nearest':
msg = 'unsupported operand'
else:
msg = 'requires scalar valued input'
else:
msg = 'invalid key'
with pytest.raises(TypeError, match=msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize("method,loc", [
('pad', 1), ('backfill', 2), ('nearest', 1)])
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ['pad', 'backfill', 'nearest'])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with pytest.raises(KeyError, match='1.1'):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='must be numeric'):
index.get_loc(1.1, 'nearest', tolerance='invalid')
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance .* valid if'):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match='tolerance size must match'):
index.get_loc(1.1, 'nearest', tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='nearest')
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(['a', 'c'])
with pytest.raises(TypeError, match='unsupported operand type'):
index.get_loc('a', method='pad', tolerance='invalid')
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
def test_slice_float_locs(self):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
@pytest.mark.xfail(reason="Assertions were not correct - see GH#20915",
strict=True)
def test_slice_ints_with_floats_raises(self):
# int slicing with floats
# GH 4892, these are all TypeErrors
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
n = len(index)
pytest.raises(TypeError,
lambda: index.slice_locs(5.0, 10.0))
pytest.raises(TypeError,
lambda: index.slice_locs(4.5, 10.5))
index2 = index[::-1]
pytest.raises(TypeError,
lambda: index2.slice_locs(8.5, 1.5), (2, 6))
pytest.raises(TypeError,
lambda: index2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
index = Index(['a', 'a', 'b', 'c', 'd', 'd'])
assert index.slice_locs('a', 'd') == (0, 6)
assert index.slice_locs(end='d') == (0, 6)
assert index.slice_locs('a', 'c') == (0, 4)
assert index.slice_locs('b', 'd') == (2, 6)
index2 = index[::-1]
assert index2.slice_locs('d', 'a') == (0, 6)
assert index2.slice_locs(end='a') == (0, 6)
assert index2.slice_locs('d', 'b') == (0, 4)
assert index2.slice_locs('c', 'a') == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=''):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=''):
index.slice_locs(end=1.5)
@pytest.mark.parametrize("in_slice,expected", [
(pd.IndexSlice[::-1], 'yxdcb'), (pd.IndexSlice['b':'y':-1], ''),
(pd.IndexSlice['b'::-1], 'b'), (pd.IndexSlice[:'b':-1], 'yxdcb'),
(pd.IndexSlice[:'y':-1], 'y'), (pd.IndexSlice['y'::-1], 'yxdcb'),
(pd.IndexSlice['y'::-4], 'yb'),
# absent labels
(pd.IndexSlice[:'a':-1], 'yxdcb'), (pd.IndexSlice[:'a':-2], 'ydb'),
(pd.IndexSlice['z'::-1], 'yxdcb'), (pd.IndexSlice['z'::-3], 'yc'),
(pd.IndexSlice['m'::-1], 'dcb'), (pd.IndexSlice[:'m':-1], 'yx'),
(pd.IndexSlice['a':'a':-1], ''), (pd.IndexSlice['z':'z':-1], ''),
(pd.IndexSlice['m':'m':-1], '')
])
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list('bcdxy'))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = index[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
def test_drop_by_str_label(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("keys", [['foo', 'bar'], ['1', 'bar']])
def test_drop_by_str_label_raises_missing_keys(self, keys):
with pytest.raises(KeyError, match=''):
self.strIndex.drop(keys)
def test_drop_by_str_label_errors_ignore(self):
# TODO: Parametrize these after replacing self.strIndex with fixture
# errors='ignore'
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
tm.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with pytest.raises(KeyError, match=''):
index.drop([3, 4])
@pytest.mark.parametrize("key,expected", [
(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))])
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors='ignore')
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("values", [['a', 'b', ('c', 'd')],
['a', ('c', 'd'), 'b'],
[('c', 'd'), 'a', 'b']])
@pytest.mark.parametrize("to_drop", [[('c', 'd'), 'a'], ['a', ('c', 'd')]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(['b'])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
for drop_me in to_drop[1], [to_drop[1]]:
pytest.raises(KeyError, removed.drop, drop_me)
@pytest.mark.parametrize("method,expected", [
('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])),
('union', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'),
(2, 'C')], dtype=[('num', int), ('let', 'a1')]))
])
def test_tuple_union_bug(self, method, expected):
index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')]))
index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')]))
result = getattr(index1, method)(index2)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("attr", [
'is_monotonic_increasing', 'is_monotonic_decreasing',
'_is_strictly_monotonic_increasing',
'_is_strictly_monotonic_decreasing'])
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_get_set_value(self):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
assert values[67] == 10
@pytest.mark.parametrize("values", [
['foo', 'bar', 'quux'], {'foo', 'bar', 'quux'}])
@pytest.mark.parametrize("index,expected", [
(Index(['qux', 'baz', 'foo', 'bar']),
np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)) # empty
])
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
# all nans are the same
if (isinstance(nulls_fixture, float) and
isinstance(nulls_fixture2, float) and
math.isnan(nulls_fixture) and
math.isnan(nulls_fixture2)):
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, True]))
else:
tm.assert_numpy_array_equal(Index(['a', nulls_fixture]).isin(
[nulls_fixture2]), np.array([False, False]))
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[np.nan]), np.array([False, True]))
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(Float64Index([1.0, nulls_fixture]).isin(
[pd.NaT]), np.array([False, False]))
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = 'foobar'
tm.assert_numpy_array_equal(expected,
index.isin(values, level='foobar'))
@pytest.mark.parametrize("level", [1, 10, -2])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_bad_index(self, level, index):
with pytest.raises(IndexError, match='Too many levels'):
index.isin([], level=level)
@pytest.mark.parametrize("level", [1.0, 'foobar', 'xyzzy', np.nan])
@pytest.mark.parametrize("index", [
Index(['qux', 'baz', 'foo', 'bar']),
Float64Index([1.0, 2.0, 3.0, 4.0])])
def test_isin_level_kwarg_raises_key(self, level, index):
with pytest.raises(KeyError, match='must be same as name'):
index.isin([], level=level)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("values", [
[1, 2, 3, 4],
[1., 2., 3., 4.],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range('2018-01-01', freq='D', periods=4)])
def test_boolean_cmp(self, values):
index = Index(values)
result = (index == values)
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("name,level", [
(None, 0), ('a', 'a')])
def test_get_level_values(self, name, level):
expected = self.strIndex.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(['a', 'b'], name='asdf')
assert index.name == index[1:].name
# instance attributes of the form self.<name>Index
@pytest.mark.parametrize('index_kind',
['unicode', 'str', 'date', 'int', 'float'])
def test_join_self(self, join_type, index_kind):
res = getattr(self, '{0}Index'.format(index_kind))
joined = res.join(res, how=join_type)
assert res is joined
@pytest.mark.parametrize("method", ['strip', 'rstrip', 'lstrip'])
def test_str_attribute(self, method):
# GH9068
index = Index([' jack', 'jill ', ' jesse ', 'frank'])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')])
def test_str_attribute_raises(self, index):
with pytest.raises(AttributeError, match='only use .str accessor'):
index.str.repeat(2)
@pytest.mark.parametrize("expand,expected", [
(None, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(False, Index([['a', 'b', 'c'], ['d', 'e'], ['f']])),
(True, MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)]))])
def test_str_split(self, expand, expected):
index = Index(['a b c', 'd e', 'f'])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(['a1', 'a2', 'b1', 'b2'])
result = index.str.startswith('a')
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(['a1', 'a2', 'b1', 'b2'])
s = Series(range(4), index=index)
result = s[s.index.str.startswith('a')]
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("index,expected", [
(Index(list('abcd')), True), (Index(range(4)), False)])
def test_tab_completion(self, index, expected):
# GH 9910
result = 'str' in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, 'a', 'b', 'c'])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how='outer')
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(
left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list('ABC'), name='xxx')
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list('ABC'), name='xxx')
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list('ABC'), name='xxx')
with pytest.raises(IndexError, match='out of bounds'):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, 'foobar'])
@pytest.mark.parametrize("labels", [
[], np.array([]), ['A', 'B', 'C'], ['C', 'B', 'A'],
np.array(['A', 'B', 'C']), np.array(['C', 'B', 'A']),
# Must preserve name even if dtype changes
pd.date_range('20130101', periods=3).values,
pd.date_range('20130101', periods=3).tolist()])
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name,
labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [
[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self,
labels):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize("labels,dtype", [
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64)])
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self,
labels,
dtype):
# GH7774
index = pd.Index(list('abc'))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list('abc'))
result = index.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize("mi,expected", [
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False]))])
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(['foo', 'bar', 'baz'])])
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
with pytest.raises(ValueError, match="Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(['foo', 'bar', 'baz'])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [
pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(['01:02:03', '01:02:04'], name='label')
assert index.name == dt_conv(index).name
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index(['a', 'bb', 'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# ASCII
# short
(pd.Index(['a', 'bb', 'ccc']),
u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""),
# multiple lines
(pd.Index(['a', 'bb', 'ccc'] * 10),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""),
# truncated
(pd.Index(['a', 'bb', 'ccc'] * 100),
u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""),
# Non-ASCII
# short
(pd.Index([u'あ', u'いい', u'ううう']),
u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_compat(self, index, expected):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.skipif(not PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = repr(index)
assert result == expected
@pytest.mark.skipif(PY3, reason="compat test")
@pytest.mark.parametrize("index,expected", [
# short
(pd.Index([u'あ', u'いい', u'ううう']),
(u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")),
# multiple lines
(pd.Index([u'あ', u'いい', u'ううう'] * 10),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")),
# truncated
(pd.Index([u'あ', u'いい', u'ううう'] * 100),
(u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)"))])
def test_string_index_repr_with_unicode_option_compat(self, index,
expected):
# Enable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
result = unicode(index) # noqa
assert result == expected
@pytest.mark.parametrize('dtype', [np.int64, np.float64])
@pytest.mark.parametrize('delta', [1, 0, -1])
def test_addsub_arithmetic(self, dtype, delta):
# GH 8142
delta = dtype(delta)
index = pd.Index([10, 11, 12], dtype=dtype)
result = index + delta
expected = pd.Index(index.values + delta, dtype=dtype)
tm.assert_index_equal(result, expected)
# this subtraction used to fail
result = index - delta
expected = pd.Index(index.values - delta, dtype=dtype)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index + index, 2 * index)
tm.assert_index_equal(index - index, 0 * index)
assert not (index - index).empty
def test_iadd_preserves_name(self):
# GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name
ser = pd.Series([1, 2, 3])
ser.index.name = 'foo'
ser.index += 1
assert ser.index.name == "foo"
ser.index -= 1
assert ser.index.name == "foo"
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
def test_get_duplicates_deprecated(self):
index = pd.Index([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
index.get_duplicates()
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('idx.', 4))
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
def setup_method(self, method):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
result = index.argsort()
elif PY3:
with pytest.raises(TypeError, match="unorderable types"):
result = index.argsort()
else:
result = index.argsort()
expected = np.array(index).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
index = self.create_index()
if PY36:
with pytest.raises(TypeError, match="'>|<' not supported"):
result = np.argsort(index)
elif PY3:
with pytest.raises(TypeError, match="unorderable types"):
result = np.argsort(index)
else:
result = np.argsort(index)
expected = index.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
warning_type = RuntimeWarning if PY3 else None
with tm.assert_produces_warning(warning_type):
# Python 3: Unorderable types
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name='MyName')
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name='NewName')
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == 'MyName'
assert index2.name == 'NewName'
index3 = index.copy(names=['NewName'])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == 'MyName'
assert index.names == ['MyName']
assert index3.name == 'NewName'
assert index3.names == ['NewName']
def test_union_base(self):
index = self.create_index()
first = index[3:]
second = index[:5]
if PY3:
# unorderable types
warn_type = RuntimeWarning
else:
warn_type = None
with tm.assert_produces_warning(warn_type):
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_union_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[3:]
second = index[:5]
if PY3:
# unorderable types
warn_type = RuntimeWarning
else:
warn_type = None
with tm.assert_produces_warning(warn_type):
result = first.union(klass(second.values))
assert tm.equalContents(result, index)
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [
np.array, Series, list])
def test_intersection_different_type_base(self, klass):
# GH 10149
index = self.create_index()
first = index[:5]
second = index[:3]
result = first.intersection(klass(second.values))
assert tm.equalContents(result, second)
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
index = self.create_index()
first = index[:4]
second = index[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("dtype", [
None, object, 'category'])
@pytest.mark.parametrize("vals,expected", [
([1, 2, 3], [1, 2, 3]), ([1., 2., 3.], [1., 2., 3.]),
([1., 2., np.nan, 3.], [1., 2., 3.]),
(['A', 'B', 'C'], ['A', 'B', 'C']),
(['A', np.nan, 'B', 'C'], ['A', 'B', 'C'])])
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ['any', 'all'])
@pytest.mark.parametrize("index,expected", [
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]),
pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])),
(pd.TimedeltaIndex(['1 days', '2 days', '3 days']),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]),
pd.TimedeltaIndex(['1 days', '2 days', '3 days'])),
(pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')),
(pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M'),
pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M'))])
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", [
pd.Index([np.nan]), pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT'])])
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
def test_repr_summary(self):
with cf.option_context('display.max_seq_items', 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(['a', 'b', 'c'], name=0)
result = klass(lrange(3), index=index)
assert '0' in repr(result)
def test_print_unicode_columns(self):
df = pd.DataFrame({u("\u05d0"): [1, 2, 3],
"\u05d1": [4, 5, 6],
"c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
@pytest.mark.parametrize("func,compat_func", [
(str, text_type), # unicode string
(bytes, str) # byte string
])
def test_with_unicode(self, func, compat_func):
index = Index(lrange(1000))
if PY3:
func(index)
else:
compat_func(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(['aa'], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
naive_series = Series(dr)
aware_series = Series(dz)
with pytest.raises(TypeError):
op(dz, naive_series)
with pytest.raises(TypeError):
op(dr, aware_series)
# TODO: implement _assert_tzawareness_compat for the reverse
# comparison with the Series on the left-hand side
class TestIndexUtils(object):
@pytest.mark.parametrize('data, names, expected', [
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')),
([['a', 'a'], ['c', 'd']], None,
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])),
([['a', 'a'], ['c', 'd']], ['L1', 'L2'],
MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]],
names=['L1', 'L2'])),
])
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt',
'add', 'radd', 'sub', 'rsub',
'mul', 'rmul', 'truediv', 'rtruediv',
'floordiv', 'rfloordiv',
'pow', 'rpow', 'mod', 'divmod'])
def test_generated_op_names(opname, indices):
index = indices
if isinstance(index, ABCIndex) and opname == 'rsub':
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = '__{name}__'.format(name=opname)
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize('index_maker', tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with pytest.raises(TypeError, match='unexpected keyword argument'):
index_maker(foo='bar')
def test_deprecated_fastpath():
with tm.assert_produces_warning(FutureWarning):
idx = pd.Index(
np.array(['a', 'b'], dtype=object), name='test', fastpath=True)
expected = pd.Index(['a', 'b'], name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.Int64Index(
np.array([1, 2, 3], dtype='int64'), name='test', fastpath=True)
expected = pd.Index([1, 2, 3], name='test', dtype='int64')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.RangeIndex(0, 5, 2, name='test', fastpath=True)
expected = pd.RangeIndex(0, 5, 2, name='test')
tm.assert_index_equal(idx, expected)
with tm.assert_produces_warning(FutureWarning):
idx = pd.CategoricalIndex(['a', 'b', 'c'], name='test', fastpath=True)
expected = pd.CategoricalIndex(['a', 'b', 'c'], name='test')
tm.assert_index_equal(idx, expected)
| bsd-3-clause |
pakodekker/oceansar | oceansar/radarsim/skim_raw.py | 1 | 35022 | #!/usr/bin/env python
""" ========================================
sar_raw: SAR Raw data Generator (:mod:`srg`)
========================================
Script to compute SAR Raw data from an ocean surface.
**Arguments**
* -c, --config_file: Configuration file
* -o, --output_file: Output file
* -oc, --ocean_file: Ocean output file
* [-ro, --reuse_ocean_file]: Reuse ocean file if it exists
* [-er, --errors_file]: Errors file (only if system errors model is activated)
* [-re, --reuse_errors_file]: Reuse errors file if it exists
.. note::
This script MUST be run using MPI!
Example (4 cores machine)::
mpiexec -np 4 python sar_raw.py -c config.par -o raw_data.nc -oc ocean_state.nc -ro -er errors_file.nc -re
"""
from mpi4py import MPI
import os
import time
import argparse
import numpy as np
from matplotlib import pyplot as plt
from scipy import linalg
import numexpr as ne
import datetime
from oceansar import utils
from oceansar import ocs_io as tpio
from oceansar.utils import geometry as geosar
from oceansar.radarsim.antenna import sinc_1tx_nrx
from oceansar import constants as const
from oceansar import nrcs as rcs
from oceansar import closure
from oceansar.radarsim import range_profile as raw
from oceansar.surfaces import OceanSurface, OceanSurfaceBalancer
from oceansar.swell_spec import dir_swell_spec as s_spec
def upsample_and_dopplerize(ssraw, dop, n_up, prf):
"""
:param ssraw: data
:param dop: Geomeytric Dopper
:param n_up: Upsampling factor
:param prf: PRF
:return:
"""
# FIXME
# We should add global (mean) range cell migration here
# The azimuth dependent part is handled in the main loop of the code
# The range dependent part of the rcm is also not added.
dims = ssraw.shape
print(n_up)
out = np.zeros((dims[0] * int(n_up), dims[2]), dtype=np.complex64)
# tmp = np.zeros([raw.shape[0] * int(n_up), raw.shape[2]], dtype=np.complex)
t = (np.arange(dims[0] * int(n_up)) / prf).reshape((dims[0] * int(n_up), 1))
t2pi = t * (np.pi * 2)
for ind in range(dims[1]):
raw_zp = np.zeros((dims[0] * int(n_up), dims[2]), dtype=np.complex64)
raw_zp[0:dims[0]] = np.fft.fftshift(np.fft.fft(ssraw[:, ind, :], axis=0), axes=(0,))
raw_zp = np.roll(raw_zp, int(-dims[0]/2), axis=0)
raw_zp = np.conj(np.fft.fft(np.conj(raw_zp), axis=0)) / dims[0]
dop_phase = t2pi * (dop[ind]).reshape((1, dims[2]))
out = out + raw_zp * np.exp(1j * dop_phase)
# out = np.zeros([ssraw.shape[0], int(n_up), ssraw.shape[2]], dtype=np.complex64)
# out = out + np.sum(ssraw, axis=1).reshape((dims[0], 1, dims[2]))
# out = out.reshape((ssraw.shape[0] * int(n_up), ssraw.shape[2]))
return out
def skimraw(cfg_file, output_file, ocean_file, reuse_ocean_file, errors_file, reuse_errors_file,
plot_save=True):
###################
# INITIALIZATIONS #
###################
# MPI SETUP
comm = MPI.COMM_WORLD
size, rank = comm.Get_size(), comm.Get_rank()
# WELCOME
if rank == 0:
print('-------------------------------------------------------------------')
print(time.strftime("- OCEANSAR SKIM RAW GENERATOR: %Y-%m-%d %H:%M:%S", time.localtime()))
# print('- Copyright (c) Gerard Marull Paretas, Paco Lopez Dekker')
print('-------------------------------------------------------------------')
# CONFIGURATION FILE
# Note: variables are 'copied' to reduce code verbosity
cfg = tpio.ConfigFile(cfg_file)
info = utils.PrInfo(cfg.sim.verbosity, "SKIM raw")
# RAW
wh_tol = cfg.srg.wh_tol
nesz = cfg.srg.nesz
use_hmtf = cfg.srg.use_hmtf
scat_spec_enable = cfg.srg.scat_spec_enable
scat_spec_mode = cfg.srg.scat_spec_mode
scat_bragg_enable = cfg.srg.scat_bragg_enable
scat_bragg_model = cfg.srg.scat_bragg_model
scat_bragg_d = cfg.srg.scat_bragg_d
scat_bragg_spec = cfg.srg.scat_bragg_spec
scat_bragg_spread = cfg.srg.scat_bragg_spread
# SAR
inc_angle = np.deg2rad(cfg.radar.inc_angle)
f0 = cfg.radar.f0
pol = cfg.radar.pol
squint_r = np.radians(90 - cfg.radar.azimuth)
if pol == 'DP':
do_hh = True
do_vv = True
elif pol == 'hh':
do_hh = True
do_vv = False
else:
do_hh = False
do_vv = True
prf = cfg.radar.prf
num_ch = int(cfg.radar.num_ch)
ant_L = cfg.radar.ant_L
alt = cfg.radar.alt
v_ground = cfg.radar.v_ground
rg_bw = cfg.radar.rg_bw
over_fs = cfg.radar.Fs / cfg.radar.rg_bw
sigma_n_tx = cfg.radar.sigma_n_tx
phase_n_tx = np.deg2rad(cfg.radar.phase_n_tx)
sigma_beta_tx = cfg.radar.sigma_beta_tx
phase_beta_tx = np.deg2rad(cfg.radar.phase_beta_tx)
sigma_n_rx = cfg.radar.sigma_n_rx
phase_n_rx = np.deg2rad(cfg.radar.phase_n_rx)
sigma_beta_rx = cfg.radar.sigma_beta_rx
phase_beta_rx = np.deg2rad(cfg.radar.phase_beta_rx)
# OCEAN / OTHERS
ocean_dt = cfg.ocean.dt
if hasattr(cfg.sim, "cal_targets"):
if cfg.sim.cal_targets is False:
add_point_target = False # This for debugging
point_target_floats = True # Not really needed, but makes coding easier later
else:
print("Adding cal targets")
add_point_target = True
if cfg.sim.cal_targets.lower() == 'floating':
point_target_floats = True
else:
point_target_floats = False
else:
add_point_target = False # This for debugging
point_target_floats = True
n_sinc_samples = 10
sinc_ovs = 20
chan_sinc_vec = raw.calc_sinc_vec(n_sinc_samples, sinc_ovs, Fs=over_fs)
# Set win direction with respect to beam
# I hope the following line is correct, maybe sign is wrong
wind_dir = cfg.radar.azimuth - cfg.ocean.wind_dir
# OCEAN SURFACE
if rank == 0:
print('Initializing ocean surface...')
surface_full = OceanSurface()
# Setup compute values
compute = ['D', 'Diff', 'Diff2']
if use_hmtf:
compute.append('hMTF')
# Try to reuse initialized surface
if reuse_ocean_file:
try:
surface_full.load(ocean_file, compute)
except RuntimeError:
pass
if (not reuse_ocean_file) or (not surface_full.initialized):
if hasattr(cfg.ocean, 'use_buoy_data'):
if cfg.ocean.use_buoy_data:
bdataf = cfg.ocean.buoy_data_file
date = datetime.datetime(np.int(cfg.ocean.year),
np.int(cfg.ocean.month),
np.int(cfg.ocean.day),
np.int(cfg.ocean.hour),
np.int(cfg.ocean.minute), 0)
date, bdata = tpio.load_buoydata(bdataf, date)
# FIX-ME: direction needs to consider also azimuth of beam
buoy_spec = tpio.BuoySpectra(bdata, heading=cfg.radar.heading, depth=cfg.ocean.depth)
dirspectrum_func = buoy_spec.Sk2
# Since the wind direction is included in the buoy data
wind_dir = 0
else:
dirspectrum_func = None
if cfg.ocean.swell_dir_enable:
dir_swell_spec = s_spec.ardhuin_swell_spec
else:
dir_swell_spec = None
wind_dir = np.deg2rad(wind_dir)
else:
if cfg.ocean.swell_dir_enable:
dir_swell_spec = s_spec.ardhuin_swell_spec
else:
dir_swell_spec = None
dirspectrum_func = None
wind_dir = np.deg2rad(wind_dir)
surface_full.init(cfg.ocean.Lx, cfg.ocean.Ly, cfg.ocean.dx,
cfg.ocean.dy, cfg.ocean.cutoff_wl,
cfg.ocean.spec_model, cfg.ocean.spread_model,
wind_dir,
cfg.ocean.wind_fetch, cfg.ocean.wind_U,
cfg.ocean.current_mag,
np.deg2rad(cfg.radar.azimuth - cfg.ocean.current_dir),
cfg.radar.azimuth - cfg.ocean.dir_swell_dir,
cfg.ocean.freq_r, cfg.ocean.sigf,
cfg.ocean.sigs, cfg.ocean.Hs,
cfg.ocean.swell_dir_enable,
cfg.ocean.swell_enable, cfg.ocean.swell_ampl,
np.deg2rad(cfg.radar.azimuth - cfg.ocean.swell_dir),
cfg.ocean.swell_wl,
compute, cfg.ocean.opt_res,
cfg.ocean.fft_max_prime,
choppy_enable=cfg.ocean.choppy_enable,
depth=cfg.ocean.depth,
dirspectrum_func=dirspectrum_func,
dir_swell_spec=dir_swell_spec)
surface_full.save(ocean_file)
# Now we plot the directional spectrum
# self.wave_dirspec[good_k] = dirspectrum_func(self.kx[good_k], self.ky[good_k])
plt.figure()
plt.imshow(np.fft.fftshift(surface_full.wave_dirspec),
extent=[surface_full.kx.min(), surface_full.kx.max(),
surface_full.ky.min(), surface_full.ky.max()],
origin='lower',
cmap='inferno_r')
plt.grid(True)
pltax = plt.gca()
pltax.set_xlim((-1, 1))
pltax.set_ylim((-1, 1))
Narr_length = 0.08 # np.min([surface_full.kx.max(), surface_full.ky.max()])
pltax.arrow(0, 0,
-Narr_length * np.sin(np.radians(cfg.radar.heading)),
Narr_length * np.cos(np.radians(cfg.radar.heading)),
fc="k", ec="k")
plt.xlabel('$k_x$ [rad/m]')
plt.ylabel('$k_y$ [rad/m]')
plt.colorbar()
#plt.show()
# Create plots directory
plot_path = os.path.dirname(output_file) + os.sep + 'raw_plots'
if plot_save:
if not os.path.exists(plot_path):
os.makedirs(plot_path)
plt.savefig(os.path.join(plot_path, 'input_dirspectrum.png'))
plt.close()
if cfg.ocean.swell_dir_enable:
plt.figure()
plt.imshow(np.fft.fftshift(np.abs(surface_full.swell_dirspec)),
extent=[surface_full.kx.min(), surface_full.kx.max(),
surface_full.ky.min(), surface_full.ky.max()],
origin='lower',
cmap='inferno_r')
plt.grid(True)
pltax = plt.gca()
pltax.set_xlim((-0.1, 0.1))
pltax.set_ylim((-0.1, 0.1))
Narr_length = 0.08 # np.min([surface_full.kx.max(), surface_full.ky.max()])
pltax.arrow(0, 0,
-Narr_length * np.sin(np.radians(cfg.radar.heading)),
Narr_length * np.cos(np.radians(cfg.radar.heading)),
fc="k", ec="k")
plt.xlabel('$k_x$ [rad/m]')
plt.ylabel('$k_y$ [rad/m]')
plt.colorbar()
#plt.show()
# Create plots directory
plot_path = os.path.dirname(output_file) + os.sep + 'raw_plots'
if plot_save:
if not os.path.exists(plot_path):
os.makedirs(plot_path)
plt.savefig(os.path.join(plot_path, 'input_dirspectrum_combined.png'))
plt.close()
else:
surface_full = None
# Initialize surface balancer
surface = OceanSurfaceBalancer(surface_full, ocean_dt)
# CALCULATE PARAMETERS
if rank == 0:
print('Initializing simulation parameters...')
# SR/GR/INC Matrixes
sr0 = geosar.inc_to_sr(inc_angle, alt)
gr0 = geosar.inc_to_gr(inc_angle, alt)
gr = surface.x + gr0
sr, inc, _ = geosar.gr_to_geo(gr, alt)
print(sr.dtype)
look = geosar.inc_to_look(inc, alt)
min_sr = np.min(sr)
# sr -= np.min(sr)
#inc = np.repeat(inc[np.newaxis, :], surface.Ny, axis=0)
#sr = np.repeat(sr[np.newaxis, :], surface.Ny, axis=0)
#gr = np.repeat(gr[np.newaxis, :], surface.Ny, axis=0)
#Let's try to safe some memory and some operations
inc = inc.reshape(1, inc.size)
look = look.reshape(1, inc.size)
sr = sr.reshape(1, sr.size)
gr = gr.reshape(1, gr.size)
sin_inc = np.sin(inc)
cos_inc = np.cos(inc)
# lambda, K, resolution, time, etc.
l0 = const.c/f0
k0 = 2.*np.pi*f0/const.c
sr_res = const.c/(2.*rg_bw)
sr_smp = const.c / (2 * cfg.radar.Fs)
if cfg.radar.L_total:
ant_L = ant_L/np.float(num_ch)
if v_ground == 'auto':
v_ground = geosar.orbit_to_vel(alt, ground=True)
v_orb = geosar.orbit_to_vel(alt, ground=False)
else:
v_orb = v_ground
t_step = 1./prf
az_steps = int(cfg.radar.n_pulses)
t_span = az_steps / prf
rg_samp = np.int(utils.optimize_fftsize(cfg.radar.n_rg))
#min_sr = np.mean(sr) - rg_samp / 2 * sr_smp
print(sr_smp)
max_sr = np.mean(sr) + rg_samp / 2 * sr_smp
sr_prof = (np.arange(rg_samp) - rg_samp/2) * sr_smp + np.mean(sr)
gr_prof, inc_prof, look_prof, b_prof = geosar.sr_to_geo(sr_prof, alt)
look_prof = look_prof.reshape((1, look_prof.size))
sr_prof = sr_prof.reshape((1, look_prof.size))
dop_ref = 2 * v_orb * np.sin(look_prof) * np.sin(squint_r) / l0
print("skim_raw: Doppler Centroid is %f Hz" % (np.mean(dop_ref)))
if cfg.srg.two_scale_Doppler:
# We will compute less surface realizations
n_pulses_b = utils.optimize_fftsize(int(cfg.srg.surface_coh_time * prf))/2
print("skim_raw: down-sampling rate =%i" % (n_pulses_b))
# n_pulses_b = 4
az_steps_ = int(np.ceil(az_steps / n_pulses_b))
t_step = t_step * n_pulses_b
# Maximum length in azimuth that we can consider to have the same geometric Doppler
dy_integ = cfg.srg.phase_err_tol / (2 * k0 * v_ground / min_sr * cfg.srg.surface_coh_time)
surface_dy = surface.y[1] - surface.y[0]
ny_integ = (dy_integ / surface.dy)
print("skim_raw: ny_integ=%f" % (ny_integ))
if ny_integ < 1:
ny_integ = 1
else:
ny_integ = int(2**np.floor(np.log2(ny_integ)))
info.msg("skim_raw: size of intermediate radar data: %f MB" % (8 * ny_integ * az_steps_ * rg_samp *1e-6),
importance=1)
info.msg("skim_raw: ny_integ=%i" % (ny_integ), importance=1)
if do_hh:
proc_raw_hh = np.zeros([az_steps_, int(surface.Ny / ny_integ), rg_samp], dtype=np.complex)
proc_raw_hh_step = np.zeros([surface.Ny, rg_samp], dtype=np.complex)
if do_vv:
proc_raw_vv = np.zeros([az_steps_, int(surface.Ny / ny_integ), rg_samp], dtype=np.complex)
proc_raw_vv_step = np.zeros([surface.Ny, rg_samp], dtype=np.complex)
# Doppler centroid
# sin(a+da) = sin(a) + cos(a)*da - 1/2*sin(a)*da**2
az = surface.y.reshape((surface.Ny, 1))
# FIX-ME: this is a coarse approximation
da = az/gr_prof
sin_az = np.sin(squint_r) + np.cos(squint_r) * da - 0.5 * np.sin(squint_r) * da**2
dop0 = 2 * v_orb * np.sin(look_prof) * sin_az / l0
# az / 2 * sin_sr _az
# az_now = (t_now - t_span / 2.) * v_ground * np.cos(squint_r)
# az = np.repeat((surface.y - az_now)[:, np.newaxis], surface.Nx, axis=1)
# az = (surface.y - az_now).reshape((surface.Ny, 1))
# print("Max az: %f" % (np.max(az)))
#dop0 = np.mean(np.reshape(dop0, (surface.Ny/ny_integ, ny_integ, rg_samp)), axis=1)
s_int = np.int(surface.Ny / ny_integ)
dop0 = np.mean(np.reshape(dop0, (s_int, np.int(ny_integ), rg_samp)), axis=1)
else:
az_steps_ = az_steps
if do_hh:
proc_raw_hh = np.zeros([az_steps, rg_samp], dtype=np.complex)
if do_vv:
proc_raw_vv = np.zeros([az_steps, rg_samp], dtype=np.complex)
t_last_rcs_bragg = -1.
last_progress = -1
NRCS_avg_vv = np.zeros(az_steps, dtype=np.float)
NRCS_avg_hh = np.zeros(az_steps, dtype=np.float)
## RCS MODELS
# Specular
if scat_spec_enable:
if scat_spec_mode == 'kodis':
rcs_spec = rcs.RCSKodis(inc, k0, surface.dx, surface.dy)
elif scat_spec_mode == 'fa' or scat_spec_mode == 'spa':
spec_ph0 = np.random.uniform(0., 2.*np.pi,
size=[surface.Ny, surface.Nx])
rcs_spec = rcs.RCSKA(scat_spec_mode, k0, surface.x, surface.y,
surface.dx, surface.dy)
else:
raise NotImplementedError('RCS mode %s for specular scattering not implemented' % scat_spec_mode)
# Bragg
if scat_bragg_enable:
phase_bragg = np.zeros([2, surface.Ny, surface.Nx])
bragg_scats = np.zeros([2, surface.Ny, surface.Nx], dtype=np.complex)
# dop_phase_p = np.random.uniform(0., 2.*np.pi, size=[surface.Ny, surface.Nx])
# dop_phase_m = np.random.uniform(0., 2.*np.pi, size=[surface.Ny, surface.Nx])
tau_c = closure.grid_coherence(cfg.ocean.wind_U,surface.dx, f0)
rndscat_p = closure.randomscat_ts(tau_c, (surface.Ny, surface.Nx), prf)
rndscat_m = closure.randomscat_ts(tau_c, (surface.Ny, surface.Nx), prf)
# NOTE: This ignores slope, may be changed
k_b = 2.*k0*sin_inc
c_b = sin_inc*np.sqrt(const.g/k_b + 0.072e-3*k_b)
if scat_bragg_model == 'romeiser97':
current_dir = np.deg2rad(cfg.ocean.current_dir)
current_vec = (cfg.ocean.current_mag *
np.array([np.cos(current_dir),
np.sin(current_dir)]))
U_dir = np.deg2rad(cfg.ocean.wind_dir)
U_vec = (cfg.ocean.wind_U *
np.array([np.cos(U_dir), np.sin(U_dir)]))
U_eff_vec = U_vec - current_vec
rcs_bragg = rcs.RCSRomeiser97(k0, inc, pol,
surface.dx, surface.dy,
linalg.norm(U_eff_vec),
np.arctan2(U_eff_vec[1],
U_eff_vec[0]),
surface.wind_fetch,
scat_bragg_spec, scat_bragg_spread,
scat_bragg_d)
else:
raise NotImplementedError('RCS model %s for Bragg scattering not implemented' % scat_bragg_model)
surface_area = surface.dx * surface.dy * surface.Nx * surface.Ny
###################
# SIMULATION LOOP #
###################
if rank == 0:
print('Computing profiles...')
for az_step in np.arange(az_steps_, dtype=np.int):
# AZIMUTH & SURFACE UPDATE
t_now = az_step * t_step
az_now = (t_now - t_span/2.)*v_ground * np.cos(squint_r)
# az = np.repeat((surface.y - az_now)[:, np.newaxis], surface.Nx, axis=1)
az = (surface.y - az_now).reshape((surface.Ny, 1))
surface.t = t_now
if az_step == 0:
# Check wave-height
info.msg("Standard deviation of wave-height (peak-to-peak; i.e. x2): %f" % (2 * np.std(surface.Dz)))
#if az_step == 0:
# print("Max Dx: %f" % (np.max(surface.Dx)))
# print("Max Dy: %f" % (np.max(surface.Dy)))
# print("Max Dz: %f" % (np.max(surface.Dz)))
# print("Max Diffx: %f" % (np.max(surface.Diffx)))
# print("Max Diffy: %f" % (np.max(surface.Diffy)))
# print("Max Diffxx: %f" % (np.max(surface.Diffxx)))
# print("Max Diffyy: %f" % (np.max(surface.Diffyy)))
# print("Max Diffxy: %f" % (np.max(surface.Diffxy)))
# COMPUTE RCS FOR EACH MODEL
# Note: SAR processing is range independent as slant range is fixed
sin_az = az / sr
az_proj_angle = np.arcsin(az / gr0)
# Note: Projected displacements are added to slant range
if point_target_floats is False: # This can only happen if point targets are enabled
surface.Dx[int(surface.Ny / 2), int(surface.Nx / 2)] = 0
surface.Dy[int(surface.Ny / 2), int(surface.Nx / 2)] = 0
surface.Dz[int(surface.Ny / 2), int(surface.Nx / 2)] = 0
if cfg.srg.two_scale_Doppler:
# slant-range for phase
sr_surface = (sr - cos_inc * surface.Dz
+ surface.Dx * sin_inc + surface.Dy * sin_az)
if cfg.srg.rcm:
# add non common rcm
sr_surface4rcm = sr_surface + az / 2 * sin_az
else:
sr_surface4rcm = sr_surface
else:
# FIXME: check if global shift is included, in case we care about slow simulations
# slant-range for phase and Doppler
sr_surface = (sr - cos_inc*surface.Dz + az/2*sin_az
+ surface.Dx*sin_inc + surface.Dy*sin_az)
sr_surface4rcm = sr_surface
if do_hh:
scene_hh = np.zeros([int(surface.Ny), int(surface.Nx)], dtype=np.complex)
if do_vv:
scene_vv = np.zeros([int(surface.Ny), int(surface.Nx)], dtype=np.complex)
# Specular
if scat_spec_enable:
if scat_spec_mode == 'kodis':
Esn_sp = np.sqrt(4.*np.pi)*rcs_spec.field(az_proj_angle, sr_surface,
surface.Diffx, surface.Diffy,
surface.Diffxx, surface.Diffyy, surface.Diffxy)
if do_hh:
scene_hh += Esn_sp
if do_vv:
scene_vv += Esn_sp
else:
# FIXME
if do_hh:
pol_tmp = 'hh'
Esn_sp = (np.exp(-1j*(2.*k0*sr_surface)) * (4.*np.pi)**1.5 *
rcs_spec.field(1, 1, pol_tmp[0], pol_tmp[1],
inc, inc,
az_proj_angle, az_proj_angle + np.pi,
surface.Dz,
surface.Diffx, surface.Diffy,
surface.Diffxx,
surface.Diffyy,
surface.Diffxy))
scene_hh += Esn_sp
if do_vv:
pol_tmp = 'vv'
Esn_sp = (np.exp(-1j*(2.*k0*sr_surface)) * (4.*np.pi)**1.5 *
rcs_spec.field(1, 1, pol_tmp[0], pol_tmp[1],
inc, inc,
az_proj_angle, az_proj_angle + np.pi,
surface.Dz,
surface.Diffx, surface.Diffy,
surface.Diffxx,
surface.Diffyy,
surface.Diffxy))
scene_vv += Esn_sp
NRCS_avg_hh[az_step] += (np.sum(np.abs(Esn_sp)**2) / surface_area)
NRCS_avg_vv[az_step] += NRCS_avg_hh[az_step]
# Bragg
if scat_bragg_enable:
if (t_now - t_last_rcs_bragg) > ocean_dt:
if scat_bragg_model == 'romeiser97':
if pol == 'DP':
RCS_bragg_hh, RCS_bragg_vv = rcs_bragg.rcs(az_proj_angle,
surface.Diffx,
surface.Diffy)
elif pol=='hh':
RCS_bragg_hh = rcs_bragg.rcs(az_proj_angle,
surface.Diffx,
surface.Diffy)
else:
RCS_bragg_vv = rcs_bragg.rcs(az_proj_angle,
surface.Diffx,
surface.Diffy)
if use_hmtf:
# Fix Bad MTF points
surface.hMTF[np.where(surface.hMTF < -1)] = -1
if do_hh:
RCS_bragg_hh[0] *= (1 + surface.hMTF)
RCS_bragg_hh[1] *= (1 + surface.hMTF)
if do_vv:
RCS_bragg_vv[0] *= (1 + surface.hMTF)
RCS_bragg_vv[1] *= (1 + surface.hMTF)
t_last_rcs_bragg = t_now
if do_hh:
scat_bragg_hh = np.sqrt(RCS_bragg_hh)
NRCS_bragg_hh_instant_avg = np.sum(RCS_bragg_hh) / surface_area
NRCS_avg_hh[az_step] += NRCS_bragg_hh_instant_avg
if do_vv:
scat_bragg_vv = np.sqrt(RCS_bragg_vv)
NRCS_bragg_vv_instant_avg = np.sum(RCS_bragg_vv) / surface_area
NRCS_avg_vv[az_step] += NRCS_bragg_vv_instant_avg
# Doppler phases (Note: Bragg radial velocity taken constant!)
surf_phase = - (2 * k0) * sr_surface
cap_phase = (2 * k0) * t_step * c_b * (az_step + 1)
phase_bragg[0] = surf_phase - cap_phase # + dop_phase_p
phase_bragg[1] = surf_phase + cap_phase # + dop_phase_m
bragg_scats[0] = rndscat_m.scats(t_now)
bragg_scats[1] = rndscat_p.scats(t_now)
if do_hh:
scene_hh += ne.evaluate('sum(scat_bragg_hh * exp(1j*phase_bragg) * bragg_scats, axis=0)')
if do_vv:
scene_vv += ne.evaluate('sum(scat_bragg_vv * exp(1j*phase_bragg) * bragg_scats, axis=0)')
if add_point_target:
# Now we replace scattering at center by fixed value
pt_y = int(surface.Ny / 2)
pt_x = int(surface.Nx / 2)
if do_hh:
scene_hh[pt_y, pt_x] = 1000 * np.exp(-1j * 2 * k0 * sr_surface[pt_y, pt_x])
if do_vv:
scene_vv[pt_y, pt_x] = 1000 * np.exp(-1j * 2 * k0 * sr_surface[pt_y, pt_x])
## ANTENNA PATTERN
## FIXME: this assume co-located Tx and Tx, so it will not work for true bistatic configurations
if cfg.radar.L_total:
beam_pattern = sinc_1tx_nrx(sin_az, ant_L * num_ch, f0, num_ch, field=True)
else:
beam_pattern = sinc_1tx_nrx(sin_az, ant_L, f0, 1, field=True)
# GENERATE CHANEL PROFILES
if cfg.srg.two_scale_Doppler:
sr_surface_ = sr_surface4rcm
if do_hh:
proc_raw_hh_step[:, :] = 0
proc_raw_hh_ = proc_raw_hh_step
scene_bp_hh = scene_hh * beam_pattern
if do_vv:
proc_raw_vv_step[:, :] = 0
proc_raw_vv_ = proc_raw_vv_step
scene_bp_vv = scene_vv * beam_pattern
else:
sr_surface_ = sr_surface4rcm.flatten()
if do_hh:
proc_raw_hh_ = proc_raw_hh[az_step]
scene_bp_hh = (scene_hh * beam_pattern).flatten()
if do_vv:
proc_raw_vv_ = proc_raw_vv[az_step]
scene_bp_vv = (scene_vv * beam_pattern).flatten()
if do_hh:
raw.chan_profile_numba(sr_surface_,
scene_bp_hh,
sr_smp,
sr_prof.min(),
chan_sinc_vec,
n_sinc_samples, sinc_ovs,
proc_raw_hh_,
rg_only=cfg.srg.two_scale_Doppler)
if do_vv:
raw.chan_profile_numba(sr_surface_,
scene_bp_vv,
sr_smp,
sr_prof.min(),
chan_sinc_vec,
n_sinc_samples, sinc_ovs,
proc_raw_vv_,
rg_only=cfg.srg.two_scale_Doppler)
if cfg.srg.two_scale_Doppler:
#Integrate in azimuth
s_int = np.int(surface.Ny/ny_integ)
if do_hh:
proc_raw_hh[az_step] = np.sum(np.reshape(proc_raw_hh_,
(s_int, ny_integ, rg_samp)), axis=1)
info.msg("Max abs(HH): %f" % np.max(np.abs(proc_raw_hh[az_step])), importance=1)
if do_vv:
#print(proc_raw_vv.shape)
proc_raw_vv[az_step] = np.sum(np.reshape(proc_raw_vv_,
(s_int, ny_integ, rg_samp)), axis=1)
info.msg("Max abs(VV): %f" % np.max(np.abs(proc_raw_vv[az_step])), importance=1)
# SHOW PROGRESS (%)
current_progress = np.int((100*az_step)/az_steps_)
if current_progress != last_progress:
last_progress = current_progress
info.msg('SP,%d,%d,%d%%' % (rank, size, current_progress), importance=1)
if cfg.srg.two_scale_Doppler:
# No we have to up-sample and add Doppler
info.msg("skim_raw: Dopplerizing and upsampling")
print(dop0.max())
print(n_pulses_b)
print(prf)
if do_hh:
proc_raw_hh = upsample_and_dopplerize(proc_raw_hh, dop0, n_pulses_b, prf)
if do_vv:
proc_raw_vv = upsample_and_dopplerize(proc_raw_vv, dop0, n_pulses_b, prf)
# MERGE RESULTS
if do_hh:
total_raw_hh = np.empty_like(proc_raw_hh) if rank == 0 else None
comm.Reduce(proc_raw_hh, total_raw_hh, op=MPI.SUM, root=0)
if do_vv:
total_raw_vv = np.empty_like(proc_raw_vv) if rank == 0 else None
comm.Reduce(proc_raw_vv, total_raw_vv, op=MPI.SUM, root=0)
## PROCESS REDUCED RAW DATA & SAVE (ROOT)
if rank == 0:
info.msg('calibrating and saving results...')
# Filter and decimate
#range_filter = np.ones_like(total_raw)
#range_filter[:, :, rg_samp/(2*2*cfg.radar.over_fs):-rg_samp/(2*2*cfg.radar.over_fs)] = 0
#total_raw = np.fft.ifft(range_filter*np.fft.fft(total_raw))
if do_hh:
total_raw_hh = total_raw_hh[:, :cfg.radar.n_rg]
if do_vv:
total_raw_vv = total_raw_vv[:, :cfg.radar.n_rg]
# Calibration factor (projected antenna pattern integrated in azimuth)
az_axis = np.arange(-t_span/2.*v_ground, t_span/2.*v_ground, sr0*const.c/(np.pi*f0*ant_L*10.))
if cfg.radar.L_total:
pattern = sinc_1tx_nrx(az_axis/sr0, ant_L * num_ch, f0,
num_ch, field=True)
else:
pattern = sinc_1tx_nrx(az_axis/sr0, ant_L, f0, 1,
field=True)
cal_factor = (1. / np.sqrt(np.trapz(np.abs(pattern)**2., az_axis) *
sr_res/np.sin(inc_angle)))
if do_hh:
noise = (utils.db2lin(nesz, amplitude=True) / np.sqrt(2.) *
(np.random.normal(size=total_raw_hh.shape) +
1j*np.random.normal(size=total_raw_hh.shape)))
total_raw_hh = total_raw_hh * cal_factor + noise
if do_vv:
noise = (utils.db2lin(nesz, amplitude=True) / np.sqrt(2.) *
(np.random.normal(size=total_raw_vv.shape) +
1j*np.random.normal(size=total_raw_vv.shape)))
total_raw_vv = total_raw_vv * cal_factor + noise
# Add slow-time error
# if use_errors:
# if do_hh:
# total_raw_hh *= errors.beta_noise
# if do_vv:
# total_raw_vv *= errors.beta_noise
# Save RAW data
if do_hh and do_vv:
rshp = (1,) + total_raw_hh.shape
total_raw = np.concatenate((total_raw_hh.reshape(rshp),
total_raw_vv.reshape(rshp)))
rshp = (1,) + NRCS_avg_hh.shape
NRCS_avg = np.concatenate((NRCS_avg_hh.reshape(rshp),
NRCS_avg_vv.reshape(rshp)))
elif do_hh:
rshp = (1,) + total_raw_hh.shape
total_raw = total_raw_hh.reshape(rshp)
rshp = (1,) + NRCS_avg_hh.shape
NRCS_avg = NRCS_avg_hh.reshape(rshp)
else:
rshp = (1,) + total_raw_vv.shape
total_raw = total_raw_vv.reshape(rshp)
rshp = (1,) + NRCS_avg_vv.shape
NRCS_avg = NRCS_avg_vv.reshape(rshp)
raw_file = tpio.SkimRawFile(output_file, 'w', total_raw.shape)
raw_file.set('inc_angle', np.rad2deg(inc_angle))
raw_file.set('f0', f0)
# raw_file.set('num_ch', num_ch)
raw_file.set('ant_L', ant_L)
raw_file.set('prf', prf)
raw_file.set('v_ground', v_ground)
raw_file.set('orbit_alt', alt)
raw_file.set('sr0', sr0)
raw_file.set('rg_sampling', rg_bw*over_fs)
raw_file.set('rg_bw', rg_bw)
raw_file.set('raw_data*', total_raw)
raw_file.set('NRCS_avg', NRCS_avg)
raw_file.set('azimuth', cfg.radar.azimuth)
raw_file.set('dop_ref', dop_ref)
raw_file.close()
print(time.strftime("Finished [%Y-%m-%d %H:%M:%S]", time.localtime()))
if __name__ == '__main__':
# INPUT ARGUMENTS
# parser = argparse.ArgumentParser()
# parser.add_argument('-c', '--cfg_file')
# parser.add_argument('-o', '--output_file')
# parser.add_argument('-oc', '--ocean_file')
# parser.add_argument('-ro', '--reuse_ocean_file', action='store_true')
# parser.add_argument('-er', '--errors_file', type=str, default=None)
# parser.add_argument('-re', '--reuse_errors_file', action='store_true')
# args = parser.parse_args()
#
# skimraw(args.cfg_file, args.output_file,
# args.ocean_file, args.reuse_ocean_file,
# args.errors_file, args.reuse_errors_file)
##
skimraw(r"D:\research\TU Delft\Data\OceanSAR\SKIM_proxy_new.cfg",
r"D:\research\TU Delft\Data\OceanSAR\out1.nc",
r"D:\research\TU Delft\Data\OceanSAR\out2.nc",
False,
False,
False)
| gpl-3.0 |
elkingtonmcb/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
jreback/pandas | pandas/tests/io/parser/test_common.py | 1 | 67821 | """
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from inspect import signature
from io import BytesIO, StringIO
import os
import platform
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.compat import is_platform_linux
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat, option_context
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return CParserWrapper._set_noconvert_columns(self)
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
parser = MyTextFileReader()
parser.options = {
"usecols": [0, 2, 3],
"parse_dates": parse_dates,
"delimiter": ",",
}
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_empty_decimal_marker(all_parsers):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = "Only length-1 decimal markers supported"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), decimal="")
def test_bad_stream_exception(all_parsers, csv_dir_path):
# see gh-13652
#
# This test validates that both the Python engine and C engine will
# raise UnicodeDecodeError instead of C engine raising ParserError
# and swallowing the exception that caused read to fail.
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup("utf-8")
parser = all_parsers
msg = "'utf-8' codec can't decode byte"
# Stream must be binary UTF8.
with open(path, "rb") as handle, codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
) as stream:
with pytest.raises(UnicodeDecodeError, match=msg):
parser.read_csv(stream)
def test_read_csv_local(all_parsers, csv1):
prefix = "file:///" if compat.is_platform_windows() else "file://"
parser = all_parsers
fname = prefix + str(os.path.abspath(csv1))
result = parser.read_csv(fname, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_1000_sep(all_parsers):
parser = all_parsers
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
tm.assert_frame_equal(result, expected)
def test_squeeze(all_parsers):
data = """\
a,1
b,2
c,3
"""
parser = all_parsers
index = Index(["a", "b", "c"], name=0)
expected = Series([1, 2, 3], name=1, index=index)
result = parser.read_csv(StringIO(data), index_col=0, header=None, squeeze=True)
tm.assert_series_equal(result, expected)
# see gh-8217
#
# Series should not be a view.
assert not result._is_view
def test_malformed(all_parsers):
# see gh-6607
parser = all_parsers
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = "Expected 3 fields in line 4, saw 5"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), header=1, comment="#")
@pytest.mark.parametrize("nrows", [5, 3, None])
def test_malformed_chunks(all_parsers, nrows):
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
parser = all_parsers
msg = "Expected 3 fields in line 6, saw 5"
with parser.read_csv(
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
) as reader:
with pytest.raises(ParserError, match=msg):
reader.read(nrows)
def test_unnamed_columns(all_parsers):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
parser = all_parsers
expected = DataFrame(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
dtype=np.int64,
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_csv_mixed_type(all_parsers):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
parser = all_parsers
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
# see gh-21141
parser = all_parsers
if not parser.low_memory:
pytest.skip("This is a low-memory specific test")
data = """A,B,C
1,1,1,2
2,2,3,4
3,3,4,5
"""
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
expected = DataFrame(columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_read_csv_dataframe(all_parsers, csv1):
parser = all_parsers
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738],
[1.047916, -0.041232, -0.16181208307, 0.212549],
[0.498581, 0.731168, -0.537677223318, 1.346270],
[1.120202, 1.567621, 0.00364077397681, 0.675253],
[-0.487094, 0.571455, -1.6116394093, 0.103469],
[0.836649, 0.246462, 0.588542635376, 1.062782],
[-0.157161, 1.340307, 1.1957779562, -1.097007],
],
columns=["A", "B", "C", "D"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
datetime(2000, 1, 10),
datetime(2000, 1, 11),
],
name="index",
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_no_index_name(all_parsers, csv_dir_path):
parser = all_parsers
csv2 = os.path.join(csv_dir_path, "test2.csv")
result = parser.read_csv(csv2, index_col=0, parse_dates=True)
expected = DataFrame(
[
[0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
[1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
[0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
[1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
[-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
],
columns=["A", "B", "C", "D", "E"],
index=Index(
[
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
datetime(2000, 1, 6),
datetime(2000, 1, 7),
]
),
)
tm.assert_frame_equal(result, expected)
def test_read_csv_wrong_num_columns(all_parsers):
# Too few columns.
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
parser = all_parsers
msg = "Expected 6 fields in line 3, saw 7"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
def test_read_duplicate_index_explicit(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(all_parsers):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"A,B\nTrue,1\nFalse,2\nTrue,3",
{},
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
{"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
DataFrame(
[[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
columns=["A", "B"],
),
),
(
"A,B\nTRUE,1\nFALSE,2\nTRUE,3",
{},
DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
),
(
"A,B\nfoo,bar\nbar,foo",
{"true_values": ["foo"], "false_values": ["bar"]},
DataFrame([[True, False], [False, True]], columns=["A", "B"]),
),
],
)
def test_parse_bool(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_int_conversion(all_parsers):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [3, 3.0])
def test_read_nrows(all_parsers, nrows):
# see gh-10476
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
columns=["index", "A", "B", "C", "D"],
)
parser = all_parsers
result = parser.read_csv(StringIO(data), nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
def test_read_nrows_bad(all_parsers, nrows):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
msg = r"'nrows' must be an integer >=0"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), nrows=nrows)
@pytest.mark.parametrize("index_col", [0, "index"])
def test_read_chunksize_with_index(all_parsers, index_col):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
expected = DataFrame(
[
["foo", 2, 3, 4, 5],
["bar", 7, 8, 9, 10],
["baz", 12, 13, 14, 15],
["qux", 12, 13, 14, 15],
["foo2", 12, 13, 14, 15],
["bar2", 12, 13, 14, 15],
],
columns=["index", "A", "B", "C", "D"],
)
expected = expected.set_index("index")
with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
chunks = list(reader)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
def test_read_chunksize_bad(all_parsers, chunksize):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
pass
@pytest.mark.parametrize("chunksize", [2, 8])
def test_read_chunksize_and_nrows(all_parsers, chunksize):
# see gh-15755
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0, "nrows": 5}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
tm.assert_frame_equal(concat(reader), expected)
def test_read_chunksize_and_nrows_changing_size(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0, "nrows": 5}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
with pytest.raises(StopIteration, match=""):
reader.get_chunk(size=3)
def test_get_chunk_passed_chunksize(all_parsers):
parser = all_parsers
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
with parser.read_csv(StringIO(data), chunksize=2) as reader:
result = reader.get_chunk()
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
def test_read_chunksize_compat(all_parsers, kwargs):
# see gh-12185
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
tm.assert_frame_equal(concat(reader), result)
def test_read_chunksize_jagged_names(all_parsers):
# see gh-23509
parser = all_parsers
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
result = concat(reader)
tm.assert_frame_equal(result, expected)
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_iterator(all_parsers):
# see gh-6607
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
expected = parser.read_csv(StringIO(data), **kwargs)
with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
first_chunk = reader.read(3)
tm.assert_frame_equal(first_chunk, expected[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, expected[3:])
def test_iterator2(all_parsers):
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
with parser.read_csv(StringIO(data), iterator=True) as reader:
result = list(reader)
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result[0], expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[:2])
tm.assert_frame_equal(chunks[1], expected[2:4])
tm.assert_frame_equal(chunks[2], expected[4:])
def test_reader_list_skiprows(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(chunks[0], expected[1:3])
def test_iterator_stop_on_chunksize(all_parsers):
# gh-3967: stopping iteration when chunksize is specified
parser = all_parsers
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
with parser.read_csv(StringIO(data), chunksize=1) as reader:
result = list(reader)
assert len(result) == 3
expected = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=["foo", "bar", "baz"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(concat(result), expected)
@pytest.mark.parametrize(
"kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
)
def test_iterator_skipfooter_errors(all_parsers, kwargs):
msg = "'skipfooter' not supported for iteration"
parser = all_parsers
data = "a\n1\n2"
with pytest.raises(ValueError, match=msg):
with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
pass
def test_nrows_skipfooter_errors(all_parsers):
msg = "'skipfooter' not supported with 'nrows'"
data = "a\n1\n2\n3\n4\n5\n6"
parser = all_parsers
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
"""foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
""",
{"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
columns=["A", "B", "C", "D"],
),
),
(
"""foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
""",
{"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
],
names=["index1", "index2"],
),
columns=["A", "B", "C", "D"],
),
),
],
)
def test_pass_names_with_index(all_parsers, data, kwargs, expected):
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
def test_multi_index_no_level_names(all_parsers, index_col):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
headless_data = "\n".join(data.split("\n")[1:])
names = ["A", "B", "C", "D"]
parser = all_parsers
result = parser.read_csv(
StringIO(headless_data), index_col=index_col, header=None, names=names
)
expected = parser.read_csv(StringIO(data), index_col=index_col)
# No index names in headless data.
expected.index.names = [None] * 2
tm.assert_frame_equal(result, expected)
def test_multi_index_no_level_names_implicit(all_parsers):
parser = all_parsers
data = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
[2, 3, 4, 5],
[7, 8, 9, 10],
[12, 13, 14, 15],
[12, 13, 14, 15],
[12, 13, 14, 15],
],
columns=["A", "B", "C", "D"],
index=MultiIndex.from_tuples(
[
("foo", "one"),
("foo", "two"),
("foo", "three"),
("bar", "one"),
("bar", "two"),
]
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected,header",
[
("a,b", DataFrame(columns=["a", "b"]), [0]),
(
"a,b\nc,d",
DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
[0, 1],
),
],
)
@pytest.mark.parametrize("round_trip", [True, False])
def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
# see gh-14545
parser = all_parsers
data = expected.to_csv(index=False) if round_trip else data
result = parser.read_csv(StringIO(data), header=header)
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(all_parsers):
parser = all_parsers
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
result = parser.read_csv(StringIO(data), sep=" ")
expected = DataFrame(
[[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
)
tm.assert_frame_equal(result, expected)
def test_read_csv_parse_simple_list(all_parsers):
parser = all_parsers
data = """foo
bar baz
qux foo
foo
bar"""
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
tm.assert_frame_equal(result, expected)
@tm.network
def test_url(all_parsers, csv_dir_path):
# TODO: FTP testing
parser = all_parsers
kwargs = {"sep": "\t"}
url = (
"https://raw.github.com/pandas-dev/pandas/master/"
"pandas/tests/io/parser/data/salaries.csv"
)
url_result = parser.read_csv(url, **kwargs)
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
tm.assert_frame_equal(url_result, local_result)
@pytest.mark.slow
def test_local_file(all_parsers, csv_dir_path):
parser = all_parsers
kwargs = {"sep": "\t"}
local_path = os.path.join(csv_dir_path, "salaries.csv")
local_result = parser.read_csv(local_path, **kwargs)
url = "file://localhost/" + local_path
try:
url_result = parser.read_csv(url, **kwargs)
tm.assert_frame_equal(url_result, local_result)
except URLError:
# Fails on some systems.
pytest.skip("Failing on: " + " ".join(platform.uname()))
def test_path_path_lib(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_local_path(all_parsers):
parser = all_parsers
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
)
tm.assert_frame_equal(df, result)
def test_nonexistent_path(all_parsers):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
path = f"{tm.rands(10)}.csv"
msg = r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
@td.skip_if_windows # os.chmod does not work in windows
def test_no_permission(all_parsers):
# GH 23784
parser = all_parsers
msg = r"\[Errno 13\]"
with tm.ensure_clean() as path:
os.chmod(path, 0) # make file unreadable
# verify that this process cannot open the file (not running as sudo)
try:
with open(path):
pass
pytest.skip("Running as sudo.")
except PermissionError:
pass
with pytest.raises(PermissionError, match=msg) as e:
parser.read_csv(path)
assert path == e.value.filename
def test_missing_trailing_delimiters(all_parsers):
parser = all_parsers
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
columns=["A", "B", "C", "D"],
)
tm.assert_frame_equal(result, expected)
def test_skip_initial_space(all_parsers):
data = (
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
)
parser = all_parsers
result = parser.read_csv(
StringIO(data),
names=list(range(33)),
header=None,
na_values=["-9999.0"],
skipinitialspace=True,
)
expected = DataFrame(
[
[
"09-Apr-2012",
"01:10:18.300",
2456026.548822908,
12849,
1.00361,
1.12551,
330.65659,
355626618.16711,
73.48821,
314.11625,
1917.09447,
179.71425,
80.0,
240.0,
-350,
70.06056,
344.9837,
1,
1,
-0.689265,
-0.692787,
0.212036,
14.7674,
41.605,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
0,
12,
128,
]
]
)
tm.assert_frame_equal(result, expected)
def test_trailing_delimiters(all_parsers):
# see gh-2442
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=False)
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(all_parsers):
# https://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
parser = all_parsers
result = parser.read_csv(
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
)
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
def test_int64_min_issues(all_parsers):
# see gh-2599
parser = all_parsers
data = "A,B\n0,0\n0,"
result = parser.read_csv(StringIO(data))
expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(all_parsers):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"Numbers": [
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194,
]
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH38630, sometimes gives ResourceWarning", strict=False)
def test_chunks_have_consistent_numerical_type(all_parsers):
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
# Coercions should work without warnings.
with tm.assert_produces_warning(None):
result = parser.read_csv(StringIO(data))
assert type(result.a[0]) is np.float64
assert result.a.dtype == float
def test_warn_if_chunks_have_mismatched_type(all_parsers):
warning_type = None
parser = all_parsers
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if parser.engine == "c" and parser.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = parser.read_csv(StringIO(data))
assert df.a.dtype == object
@pytest.mark.parametrize("sep", [" ", r"\s+"])
def test_integer_overflow_bug(all_parsers, sep):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, sep=sep)
expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
tm.assert_frame_equal(result, expected)
def test_catch_too_many_names(all_parsers):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
parser = all_parsers
msg = (
"Too many columns specified: expected 4 and found 3"
if parser.engine == "c"
else "Number of passed names did not match "
"number of header fields in the file"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
def test_ignore_leading_whitespace(all_parsers):
# see gh-3374, gh-6607
parser = all_parsers
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
result = parser.read_csv(StringIO(data), sep=r"\s+")
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(all_parsers):
# see gh-10022
parser = all_parsers
data = "\n hello\nworld\n"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([" hello", "world"])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(all_parsers):
# see gh-10184
data = "x,y"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=0)
expected = DataFrame(columns=["y"], index=Index([], name="x"))
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index(all_parsers):
# see gh-10467
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=["x", "y"])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
)
tm.assert_frame_equal(result, expected)
def test_empty_with_reversed_multi_index(all_parsers):
data = "x,y,z"
parser = all_parsers
result = parser.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame(
columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
)
tm.assert_frame_equal(result, expected)
def test_float_parser(all_parsers):
# see gh-9565
parser = all_parsers
data = "45e-1,4.5,45.,inf,-inf"
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(",")]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(all_parsers_all_precisions):
# see gh-12215
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
data = df.to_csv(index=False)
parser, precision = all_parsers_all_precisions
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
tm.assert_frame_equal(df_roundtrip, df)
@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
def test_int64_overflow(all_parsers, conv):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
parser = all_parsers
if conv is None:
# 13007854817840016671868 > UINT64_MAX, so this
# will overflow and return object as the dtype.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
"00013007854817840016671868",
"00013007854817840016749251",
"00013007854817840016754630",
"00013007854817840016781876",
"00013007854817840017028824",
"00013007854817840017963235",
"00013007854817840018860166",
],
columns=["ID"],
)
tm.assert_frame_equal(result, expected)
else:
# 13007854817840016671868 > UINT64_MAX, so attempts
# to cast to either int64 or uint64 will result in
# an OverflowError being raised.
msg = (
"(Python int too large to convert to C long)|"
"(long too big to convert)|"
"(int too big to convert)"
)
with pytest.raises(OverflowError, match=msg):
parser.read_csv(StringIO(data), converters={"ID": conv})
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
)
def test_int64_uint64_range(all_parsers, val):
# These numbers fall right inside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
)
def test_outside_int64_uint64_range(all_parsers, val):
# These numbers fall just outside the int64-uint64
# range, so they should be parsed as string.
parser = all_parsers
result = parser.read_csv(StringIO(str(val)), header=None)
expected = DataFrame([str(val)])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp_data", [[str(-1), str(2 ** 63)], [str(2 ** 63), str(-1)]])
def test_numeric_range_too_wide(all_parsers, exp_data):
# No numerical dtype can hold both negative and uint64
# values, so they should be cast as string.
parser = all_parsers
data = "\n".join(exp_data)
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("neg_exp", [-617, -100000, -99999999999999999])
def test_very_negative_exponent(all_parsers_all_precisions, neg_exp):
# GH#38753
parser, precision = all_parsers_all_precisions
data = f"data\n10E{neg_exp}"
result = parser.read_csv(StringIO(data), float_precision=precision)
expected = DataFrame({"data": [0.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])
def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
# GH#38753
parser, precision = all_parsers_all_precisions
data = f"data\n10E{exp}"
result = parser.read_csv(StringIO(data), float_precision=precision)
if precision == "round_trip":
if exp == 999999999999999999 and is_platform_linux():
mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
request.node.add_marker(mark)
value = np.inf if exp > 0 else 0.0
expected = DataFrame({"data": [value]})
else:
expected = DataFrame({"data": [f"10E{exp}"]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("iterator", [True, False])
def test_empty_with_nrows_chunksize(all_parsers, iterator):
# see gh-9535
parser = all_parsers
expected = DataFrame(columns=["foo", "bar"])
nrows = 10
data = StringIO("foo,bar\n")
if iterator:
with parser.read_csv(data, chunksize=nrows) as reader:
result = next(iter(reader))
else:
result = parser.read_csv(data, nrows=nrows)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected,msg",
[
# gh-10728: WHITESPACE_LINE
(
"a,b,c\n4,5,6\n ",
{},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# gh-10548: EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
{"comment": "#"},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL_NOP
(
"a,b,c\n4,5,6\n\r",
{},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_COMMENT
(
"a,b,c\n4,5,6#comment",
{"comment": "#"},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# SKIP_LINE
(
"a,b,c\n4,5,6\nskipme",
{"skiprows": [2]},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# EAT_LINE_COMMENT
(
"a,b,c\n4,5,6\n#comment",
{"comment": "#", "skip_blank_lines": False},
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
None,
),
# IN_FIELD
(
"a,b,c\n4,5,6\n ",
{"skip_blank_lines": False},
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
None,
),
# EAT_CRNL
(
"a,b,c\n4,5,6\n\r",
{"skip_blank_lines": False},
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
None,
),
# ESCAPED_CHAR
(
"a,b,c\n4,5,6\n\\",
{"escapechar": "\\"},
None,
"(EOF following escape character)|(unexpected end of data)",
),
# ESCAPE_IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"\\',
{"escapechar": "\\"},
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
# IN_QUOTED_FIELD
(
'a,b,c\n4,5,6\n"',
{"escapechar": "\\"},
None,
"(EOF inside string starting at row 2)|(unexpected end of data)",
),
],
ids=[
"whitespace-line",
"eat-line-comment",
"eat-crnl-nop",
"eat-comment",
"skip-line",
"eat-line-comment",
"in-field",
"eat-crnl",
"escaped-char",
"escape-in-quoted-field",
"in-quoted-field",
],
)
def test_eof_states(all_parsers, data, kwargs, expected, msg):
# see gh-10728, gh-10548
parser = all_parsers
if expected is None:
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
def test_uneven_lines_with_usecols(all_parsers, usecols):
# see gh-12203
parser = all_parsers
data = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10"""
if usecols is None:
# Make sure that an error is still raised
# when the "usecols" parameter is not provided.
msg = r"Expected \d+ fields in line \d+, saw \d+"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
else:
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# First, check to see that the response of parser when faced with no
# provided columns raises the correct error, with or without usecols.
("", {}, None),
("", {"usecols": ["X"]}, None),
(
",,",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"], index=[0], dtype=np.float64),
),
(
"",
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
DataFrame(columns=["X"]),
),
],
)
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
# see gh-12493
parser = all_parsers
if expected is None:
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# gh-8661, gh-8679: this should ignore six lines, including
# lines with trailing whitespace and blank lines.
(
{
"header": None,
"delim_whitespace": True,
"skiprows": [0, 1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
),
# gh-8983: test skipping set of rows after a row with trailing spaces.
(
{
"delim_whitespace": True,
"skiprows": [1, 2, 3, 5, 6],
"skip_blank_lines": True,
},
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
),
],
)
def test_trailing_spaces(all_parsers, kwargs, expected):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
parser = all_parsers
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
tm.assert_frame_equal(result, expected)
def test_raise_on_sep_with_delim_whitespace(all_parsers):
# see gh-6607
data = "a b c\n1 2 3"
parser = all_parsers
with pytest.raises(ValueError, match="you can only specify one"):
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
@pytest.mark.parametrize("delim_whitespace", [True, False])
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
# see gh-9710
parser = all_parsers
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({"MyColumn": list("abab")})
result = parser.read_csv(
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"sep,skip_blank_lines,exp_data",
[
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
(
",",
False,
[
[1.0, 2.0, 4.0],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5.0, np.nan, 10.0],
[np.nan, np.nan, np.nan],
[-70.0, 0.4, 1.0],
],
),
],
)
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data):
parser = all_parsers
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
if sep == r"\s+":
data = data.replace(",", " ")
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
expected = DataFrame(exp_data, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_whitespace_lines(all_parsers):
parser = all_parsers
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,expected",
[
(
""" A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
""",
DataFrame(
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
columns=["A", "B", "C", "D"],
index=["a", "b", "c"],
),
),
(
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
),
],
)
def test_whitespace_regex_separator(all_parsers, data, expected):
# see gh-6607
parser = all_parsers
result = parser.read_csv(StringIO(data), sep=r"\s+")
tm.assert_frame_equal(result, expected)
def test_verbose_read(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
# Engines are verbose in different ways.
parser.read_csv(StringIO(data), verbose=True)
captured = capsys.readouterr()
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 3 NA values in column a\n"
def test_verbose_read2(all_parsers, capsys):
parser = all_parsers
data = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
parser.read_csv(StringIO(data), verbose=True, index_col=0)
captured = capsys.readouterr()
# Engines are verbose in different ways.
if parser.engine == "c":
assert "Tokenization took:" in captured.out
assert "Parser memory cleanup took:" in captured.out
else: # Python engine
assert captured.out == "Filled 1 NA values in column a\n"
def test_iteration_open_handle(all_parsers):
parser = all_parsers
kwargs = {"squeeze": True, "header": None}
with tm.ensure_clean() as path:
with open(path, "w") as f:
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
with open(path) as f:
for line in f:
if "CCC" in line:
break
result = parser.read_csv(f, **kwargs)
expected = Series(["DDD", "EEE", "FFF", "GGG"], name=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,thousands,decimal",
[
(
"""A|B|C
1|2,334.01|5
10|13|10.
""",
",",
".",
),
(
"""A|B|C
1|2.334,01|5
10|13|10,
""",
".",
",",
),
],
)
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
parser = all_parsers
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
result = parser.read_csv(
StringIO(data), sep="|", thousands=thousands, decimal=decimal
)
tm.assert_frame_equal(result, expected)
def test_euro_decimal_format(all_parsers):
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
expected = DataFrame(
[
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
],
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_inf_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,inf
b,-inf
c,+Inf
d,-Inf
e,INF
f,-INF
g,+INf
h,-INf
i,inF
j,-inF"""
expected = DataFrame(
{"A": [float("inf"), float("-inf")] * 5},
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_infinity_parsing(all_parsers, na_filter):
parser = all_parsers
data = """\
,A
a,Infinity
b,-Infinity
c,+Infinity
"""
expected = DataFrame(
{"A": [float("infinity"), float("-infinity"), float("+infinity")]},
index=["a", "b", "c"],
)
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
def test_raise_on_no_columns(all_parsers, nrows):
parser = all_parsers
data = "\n" * nrows
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data))
@td.check_file_leaks
def test_memory_map(all_parsers, csv_dir_path):
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
parser = all_parsers
expected = DataFrame(
{"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
)
result = parser.read_csv(mmap_file, memory_map=True)
tm.assert_frame_equal(result, expected)
def test_null_byte_char(all_parsers):
# see gh-2741
data = "\x00,foo"
names = ["a", "b"]
parser = all_parsers
if parser.engine == "c":
expected = DataFrame([[np.nan, "foo"]], columns=names)
out = parser.read_csv(StringIO(data), names=names)
tm.assert_frame_equal(out, expected)
else:
msg = "NULL byte detected"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), names=names)
def test_temporary_file(all_parsers):
# see gh-13398
parser = all_parsers
data = "0 0"
with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
new_file.write(data)
new_file.flush()
new_file.seek(0)
result = parser.read_csv(new_file, sep=r"\s+", header=None)
expected = DataFrame([[0, 0]])
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte(all_parsers):
# see gh-5500
parser = all_parsers
data = "a,b\n1\x1a,2"
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_internal_eof_byte_to_file(all_parsers):
# see gh-16559
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
path = f"__{tm.rands(10)}__.csv"
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
f.write(data)
result = parser.read_csv(path)
tm.assert_frame_equal(result, expected)
def test_sub_character(all_parsers, csv_dir_path):
# see gh-16893
filename = os.path.join(csv_dir_path, "sub_char.csv")
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
parser = all_parsers
result = parser.read_csv(filename)
tm.assert_frame_equal(result, expected)
def test_file_handle_string_io(all_parsers):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
data = "a,b\n1,2"
fh = StringIO(data)
parser.read_csv(fh)
assert not fh.closed
def test_file_handles_with_open(all_parsers, csv1):
# gh-14418
#
# Don't close user provided file handles.
parser = all_parsers
for mode in ["r", "rb"]:
with open(csv1, mode) as f:
parser.read_csv(f)
assert not f.closed
def test_invalid_file_buffer_class(all_parsers):
# see gh-15337
class InvalidBuffer:
pass
parser = all_parsers
msg = "Invalid file path or buffer object type"
with pytest.raises(ValueError, match=msg):
parser.read_csv(InvalidBuffer())
def test_invalid_file_buffer_mock(all_parsers):
# see gh-15337
parser = all_parsers
msg = "Invalid file path or buffer object type"
class Foo:
pass
with pytest.raises(ValueError, match=msg):
parser.read_csv(Foo())
def test_valid_file_buffer_seems_invalid(all_parsers):
# gh-16135: we want to ensure that "tell" and "seek"
# aren't actually being used when we call `read_csv`
#
# Thus, while the object may look "invalid" (these
# methods are attributes of the `StringIO` class),
# it is still a valid file-object for our purposes.
class NoSeekTellBuffer(StringIO):
def tell(self):
raise AttributeError("No tell method")
def seek(self, pos, whence=0):
raise AttributeError("No seek method")
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(NoSeekTellBuffer(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs",
[{}, {"error_bad_lines": True}], # Default is True. # Explicitly pass in.
)
@pytest.mark.parametrize(
"warn_kwargs", [{}, {"warn_bad_lines": True}, {"warn_bad_lines": False}]
)
def test_error_bad_lines(all_parsers, kwargs, warn_kwargs):
# see gh-15925
parser = all_parsers
kwargs.update(**warn_kwargs)
data = "a\n1\n1,2,3\n4\n5,6,7"
msg = "Expected 1 fields in line 3, saw 3"
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
def test_warn_bad_lines(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(StringIO(data), error_bad_lines=False, warn_bad_lines=True)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert "Skipping line 3" in captured.err
assert "Skipping line 5" in captured.err
def test_suppress_error_output(all_parsers, capsys):
# see gh-15925
parser = all_parsers
data = "a\n1\n1,2,3\n4\n5,6,7"
expected = DataFrame({"a": [1, 4]})
result = parser.read_csv(
StringIO(data), error_bad_lines=False, warn_bad_lines=False
)
tm.assert_frame_equal(result, expected)
captured = capsys.readouterr()
assert captured.err == ""
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
def test_filename_with_special_chars(all_parsers, filename):
# see gh-15086.
parser = all_parsers
df = DataFrame({"a": [1, 2, 3]})
with tm.ensure_clean(filename) as path:
df.to_csv(path, index=False)
result = parser.read_csv(path)
tm.assert_frame_equal(result, df)
def test_read_csv_memory_growth_chunksize(all_parsers):
# see gh-24805
#
# Let's just make sure that we don't crash
# as we iteratively process all chunks.
parser = all_parsers
with tm.ensure_clean() as path:
with open(path, "w") as f:
for i in range(1000):
f.write(str(i) + "\n")
with parser.read_csv(path, chunksize=20) as result:
for _ in result:
pass
def test_read_csv_raises_on_header_prefix(all_parsers):
# gh-27394
parser = all_parsers
msg = "Argument prefix must be None if argument header is not None"
s = StringIO("0,1\n2,3")
with pytest.raises(ValueError, match=msg):
parser.read_csv(s, header=0, prefix="_X")
def test_unexpected_keyword_parameter_exception(all_parsers):
# GH-34976
parser = all_parsers
msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg.format("read_csv")):
parser.read_csv("foo.csv", foo=1)
with pytest.raises(TypeError, match=msg.format("read_table")):
parser.read_table("foo.tsv", foo=1)
def test_read_table_same_signature_as_read_csv(all_parsers):
# GH-34976
parser = all_parsers
table_sign = signature(parser.read_table)
csv_sign = signature(parser.read_csv)
assert table_sign.parameters.keys() == csv_sign.parameters.keys()
assert table_sign.return_annotation == csv_sign.return_annotation
for key, csv_param in csv_sign.parameters.items():
table_param = table_sign.parameters[key]
if key == "sep":
assert csv_param.default == ","
assert table_param.default == "\t"
assert table_param.annotation == csv_param.annotation
assert table_param.kind == csv_param.kind
continue
else:
assert table_param == csv_param
def test_read_table_equivalency_to_read_csv(all_parsers):
# see gh-21948
# As of 0.25.0, read_table is undeprecated
parser = all_parsers
data = "a\tb\n1\t2\n3\t4"
expected = parser.read_csv(StringIO(data), sep="\t")
result = parser.read_table(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_first_row_bom(all_parsers):
# see gh-26545
parser = all_parsers
data = '''\ufeff"Head1" "Head2" "Head3"'''
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
def test_first_row_bom_unquoted(all_parsers):
# see gh-36343
parser = all_parsers
data = """\ufeffHead1 Head2 Head3"""
result = parser.read_csv(StringIO(data), delimiter="\t")
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
tm.assert_frame_equal(result, expected)
def test_integer_precision(all_parsers):
# Gh 7072
s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
parser = all_parsers
result = parser.read_csv(StringIO(s), header=None)[4]
expected = Series([4321583677327450765, 4321113141090630389], name=4)
tm.assert_series_equal(result, expected)
def test_file_descriptor_leak(all_parsers):
# GH 31488
parser = all_parsers
with tm.ensure_clean() as path:
def test():
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
parser.read_csv(path)
td.check_file_leaks(test)()
@pytest.mark.parametrize("nrows", range(1, 6))
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
# GH 28071
ref = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
columns=list("ab"),
)
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
parser = all_parsers
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
tm.assert_frame_equal(df, ref[:nrows])
def test_no_header_two_extra_columns(all_parsers):
# GH 26218
column_names = ["one", "two", "three"]
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
stream = StringIO("foo,bar,baz,bam,blah")
parser = all_parsers
df = parser.read_csv(stream, header=None, names=column_names, index_col=False)
tm.assert_frame_equal(df, ref)
def test_read_csv_names_not_accepting_sets(all_parsers):
# GH 34946
data = """\
1,2,3
4,5,6\n"""
parser = all_parsers
with pytest.raises(ValueError, match="Names should be an ordered collection."):
parser.read_csv(StringIO(data), names=set("QAZ"))
def test_read_csv_with_use_inf_as_na(all_parsers):
# https://github.com/pandas-dev/pandas/issues/35493
parser = all_parsers
data = "1.0\nNaN\n3.0"
with option_context("use_inf_as_na", True):
result = parser.read_csv(StringIO(data), header=None)
expected = DataFrame([1.0, np.nan, 3.0])
tm.assert_frame_equal(result, expected)
def test_read_table_delim_whitespace_default_sep(all_parsers):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
result = parser.read_table(f, delim_whitespace=True)
expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
@pytest.mark.parametrize("delimiter", [",", "\t"])
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
# GH: 35958
f = StringIO("a b c\n1 -2 -3\n4 5 6")
parser = all_parsers
msg = (
"Specified a delimiter with both sep and "
"delim_whitespace=True; you can only specify one."
)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, sep=delimiter)
with pytest.raises(ValueError, match=msg):
parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
def test_dict_keys_as_names(all_parsers):
# GH: 36928
data = "1,2"
keys = {"a": int, "b": int}.keys()
parser = all_parsers
result = parser.read_csv(StringIO(data), names=keys)
expected = DataFrame({"a": [1], "b": [2]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
@pytest.mark.parametrize("encoding", [None, "utf-8"])
def test_read_csv_file_handle(all_parsers, io_class, encoding):
"""
Test whether read_csv does not close user-provided file handles.
GH 36980
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
content = "a,b\n1,2"
if io_class == BytesIO:
content = content.encode("utf-8")
handle = io_class(content)
tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
assert not handle.closed
def test_memory_map_file_handle_silent_fallback(all_parsers, compression):
"""
Do not fail for buffers with memory_map=True (cannot memory map BytesIO).
GH 37621
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
handle = BytesIO()
expected.to_csv(handle, index=False, compression=compression, mode="wb")
handle.seek(0)
tm.assert_frame_equal(
parser.read_csv(handle, memory_map=True, compression=compression),
expected,
)
def test_memory_map_compression(all_parsers, compression):
"""
Support memory map for compressed files.
GH 37621
"""
parser = all_parsers
expected = DataFrame({"a": [1], "b": [2]})
with tm.ensure_clean() as path:
expected.to_csv(path, index=False, compression=compression)
tm.assert_frame_equal(
parser.read_csv(path, memory_map=True, compression=compression),
expected,
)
def test_context_manager(all_parsers, datapath):
# make sure that opened files are closed
parser = all_parsers
path = datapath("io", "data", "csv", "iris.csv")
reader = parser.read_csv(path, chunksize=1)
assert not reader._engine.handles.handle.closed
try:
with reader:
next(reader)
assert False
except AssertionError:
assert reader._engine.handles.handle.closed
def test_context_manageri_user_provided(all_parsers, datapath):
# make sure that user-provided handles are not closed
parser = all_parsers
with open(datapath("io", "data", "csv", "iris.csv"), mode="r") as path:
reader = parser.read_csv(path, chunksize=1)
assert not reader._engine.handles.handle.closed
try:
with reader:
next(reader)
assert False
except AssertionError:
assert not reader._engine.handles.handle.closed
| bsd-3-clause |
araichev/gtfstk | gtfstk/validators.py | 1 | 46598 | """
Functions about validation.
"""
import re
import pytz
import datetime as dt
from typing import Optional, List, Union, TYPE_CHECKING
import pycountry
import numpy as np
import pandas as pd
from pandas import DataFrame
from . import constants as cs
from . import helpers as hp
if TYPE_CHECKING:
from .feed import Feed
TIME_PATTERN1 = re.compile(r"^\d\d:\d\d:\d\d$")
TIME_PATTERN2 = re.compile(r"^\d:\d\d:\d\d$")
DATE_FORMAT = "%Y%m%d"
TIMEZONES = set(pytz.all_timezones)
# ISO639-1 language codes, both lower and upper case
LANGS = set(
[lang.alpha_2 for lang in pycountry.languages if hasattr(lang, "alpha_2")]
)
LANGS |= set(x.upper() for x in LANGS)
CURRENCIES = set(
[c.alpha_3 for c in pycountry.currencies if hasattr(c, "alpha_3")]
)
URL_PATTERN = re.compile(
r"^(?:http)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
EMAIL_PATTERN = re.compile(r"[^@]+@[^@]+\.[^@]+")
COLOR_PATTERN = re.compile(r"(?:[0-9a-fA-F]{2}){3}$")
def valid_str(x: str) -> bool:
"""
Return ``True`` if ``x`` is a non-blank string;
otherwise return ``False``.
"""
if isinstance(x, str) and x.strip():
return True
else:
return False
def valid_time(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid H:MM:SS or HH:MM:SS time;
otherwise return ``False``.
"""
if isinstance(x, str) and (
re.match(TIME_PATTERN1, x) or re.match(TIME_PATTERN2, x)
):
return True
else:
return False
def valid_date(x: str) -> bool:
"""
Retrun ``True`` if ``x`` is a valid YYYYMMDD date;
otherwise return ``False``.
"""
try:
if x != dt.datetime.strptime(x, DATE_FORMAT).strftime(DATE_FORMAT):
raise ValueError
return True
except ValueError:
return False
def valid_timezone(x: str) -> bool:
"""
Retrun ``True`` if ``x`` is a valid human-readable timezone string,
e.g. 'Africa/Abidjan'; otherwise return ``False``.
"""
return x in TIMEZONES
def valid_lang(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid two-letter ISO 639 language
code, e.g. 'aa'; otherwise return ``False``.
"""
return x in LANGS
def valid_currency(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid three-letter ISO 4217 currency
code, e.g. 'AED'; otherwise return ``False``.
"""
return x in CURRENCIES
def valid_url(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid URL; otherwise return ``False``.
"""
if isinstance(x, str) and re.match(URL_PATTERN, x):
return True
else:
return False
def valid_email(x: str) -> bool:
"""
Return ``True`` if ``x`` is a valid email address; otherwise return
``False``.
"""
if isinstance(x, str) and re.match(EMAIL_PATTERN, x):
return True
else:
return False
def valid_color(x: str) -> bool:
"""
Return ``True`` if ``x`` a valid hexadecimal color string without
the leading hash; otherwise return ``False``.
"""
if isinstance(x, str) and re.match(COLOR_PATTERN, x):
return True
else:
return False
def check_for_required_columns(
problems: List, table: str, df: DataFrame
) -> List:
"""
Check that the given GTFS table has the required columns.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check that the DataFrame contains the colums required by GTFS
and append to the problems list one error for each column
missing.
"""
r = cs.GTFS_REF
req_columns = r.loc[
(r["table"] == table) & r["column_required"], "column"
].values
for col in req_columns:
if col not in df.columns:
problems.append(["error", f"Missing column {col}", table, []])
return problems
def check_for_invalid_columns(
problems: List, table: str, df: DataFrame
) -> List:
"""
Check for invalid columns in the given GTFS DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
Returns
-------
list
The ``problems`` list extended as follows.
Check whether the DataFrame contains extra columns not in the
GTFS and append to the problems list one warning for each extra
column.
"""
r = cs.GTFS_REF
valid_columns = r.loc[r["table"] == table, "column"].values
for col in df.columns:
if col not in valid_columns:
problems.append(
["warning", f"Unrecognized column {col}", table, []]
)
return problems
def check_table(
problems: List,
table: str,
df: DataFrame,
condition,
message: str,
type_: str = "error",
) -> List:
"""
Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything.
"""
indices = df.loc[condition].index.tolist()
if indices:
problems.append([type_, message, table, indices])
return problems
def check_column(
problems: List,
table: str,
df: DataFrame,
column: str,
checker,
message: Optional[str] = None,
type_: str = "error",
*,
column_required: bool = True,
) -> List:
"""
Check the given column of the given GTFS with the given problem
checker.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
checker : boolean valued unary function
Returns ``True`` if and only if no problem is encountered
message : string (optional)
Problem message, e.g. 'Invalid route_id'.
Defaults to 'Invalid ``column``; maybe has extra space characters'
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Apply the checker to the column entries and record the indices
of ``df`` where the checker returns ``False``.
If the list of indices of is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
before applying the checker.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(checker)
if not message:
message = f"Invalid {column}; maybe has extra space characters"
problems = check_table(problems, table, f, cond, message, type_)
return problems
def check_column_id(
problems: List,
table: str,
df: DataFrame,
column: str,
*,
column_required: bool = True,
) -> List:
"""
A specialization of :func:`check_column`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` where the given column has
duplicated entry or an invalid strings.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
f = df.copy()
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
cond = ~f[column].map(valid_str)
problems = check_table(
problems,
table,
f,
cond,
f"Invalid {column}; maybe has extra space characters",
)
cond = f[column].duplicated()
problems = check_table(problems, table, f, cond, f"Repeated {column}")
return problems
def check_column_linked_id(
problems: List,
table: str,
df: DataFrame,
column: str,
target_df: DataFrame,
target_column: Optional[str] = None,
*,
column_required: bool = True,
) -> List:
"""
A modified version of :func:`check_column_id`.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
column : string
A column of ``df``
column_required : boolean
``True`` if and only if ``column`` is required
(and not optional) by the GTFS
target_df : DataFrame
A GTFS table
target_column : string
A column of ``target_df``; defaults to ``column_name``
Returns
-------
list
The ``problems`` list extended as follows.
Record indices of ``df`` where the following condition is
violated: ``column`` contain IDs that are valid strings and are
present in ``target_df`` under the ``target_column`` name.
If the list of indices is nonempty, append to the problems the
item ``[type_, problem, table, indices]``; otherwise do not
append anything.
If not ``column_required``, then NaN entries will be ignored
in the checking.
"""
if target_column is None:
target_column = column
f = df.copy()
if target_df is None:
g = pd.DataFrame()
g[target_column] = np.nan
else:
g = target_df.copy()
if target_column not in g.columns:
g[target_column] = np.nan
if not column_required:
if column not in f.columns:
f[column] = np.nan
f = f.dropna(subset=[column])
g = g.dropna(subset=[target_column])
cond = ~f[column].isin(g[target_column])
problems = check_table(problems, table, f, cond, f"Undefined {column}")
return problems
def format_problems(
problems: List, *, as_df: bool = False
) -> Union[List, DataFrame]:
"""
Format the given problems list as a DataFrame.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
as_df : boolean
Returns
-------
list or DataFrame
Return ``problems`` if not ``as_df``; otherwise return a
DataFrame with the problems as rows and the columns
``['type', 'message', 'table', 'rows']``.
"""
if as_df:
problems = pd.DataFrame(
problems, columns=["type", "message", "table", "rows"]
).sort_values(["type", "table"])
return problems
def check_agency(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Check that ``feed.agency`` follows the GTFS.
Return a list of problems of the form described in
:func:`check_table`;
the list will be empty if no problems are found.
"""
table = "agency"
problems = []
# Preliminary checks
if feed.agency is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.agency.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(
problems, table, f, "agency_id", column_required=False
)
# Check agency_name
problems = check_column(problems, table, f, "agency_name", valid_str)
# Check agency_url
problems = check_column(problems, table, f, "agency_url", valid_url)
# Check agency_timezone
problems = check_column(
problems, table, f, "agency_timezone", valid_timezone
)
# Check agency_fare_url
problems = check_column(
problems, table, f, "agency_fare_url", valid_url, column_required=False
)
# Check agency_lang
problems = check_column(
problems, table, f, "agency_lang", valid_lang, column_required=False
)
# Check agency_phone
problems = check_column(
problems, table, f, "agency_phone", valid_str, column_required=False
)
# Check agency_email
problems = check_column(
problems, table, f, "agency_email", valid_email, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_calendar(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar``.
"""
table = "calendar"
problems = []
# Preliminary checks
if feed.calendar is None:
return problems
f = feed.calendar.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column_id(problems, table, f, "service_id")
# Check weekday columns
v = lambda x: x in range(2)
for col in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]:
problems = check_column(problems, table, f, col, v)
# Check start_date and end_date
for col in ["start_date", "end_date"]:
problems = check_column(problems, table, f, col, valid_date)
if include_warnings:
# Check if feed has expired
d = f["end_date"].max()
if feed.calendar_dates is not None and not feed.calendar_dates.empty:
table += "/calendar_dates"
d = max(d, feed.calendar_dates["date"].max())
if d < dt.datetime.today().strftime(DATE_FORMAT):
problems.append(["warning", "Feed expired", table, []])
return format_problems(problems, as_df=as_df)
def check_calendar_dates(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "calendar_dates"
problems = []
# Preliminary checks
if feed.calendar_dates is None:
return problems
f = feed.calendar_dates.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column(problems, table, f, "service_id", valid_str)
# Check date
problems = check_column(problems, table, f, "date", valid_date)
# No duplicate (service_id, date) pairs allowed
cond = f[["service_id", "date"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (service_id, date)"
)
# Check exception_type
v = lambda x: x in [1, 2]
problems = check_column(problems, table, f, "exception_type", v)
return format_problems(problems, as_df=as_df)
def check_fare_attributes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_attributes"
problems = []
# Preliminary checks
if feed.fare_attributes is None:
return problems
f = feed.fare_attributes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_id(problems, table, f, "fare_id")
# Check currency_type
problems = check_column(
problems, table, f, "currency_type", valid_currency
)
# Check payment_method
v = lambda x: x in range(2)
problems = check_column(problems, table, f, "payment_method", v)
# Check transfers
v = lambda x: pd.isna(x) or x in range(3)
problems = check_column(problems, table, f, "transfers", v)
# Check transfer_duration
v = lambda x: x >= 0
problems = check_column(
problems, table, f, "transfer_duration", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_fare_rules(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "fare_rules"
problems = []
# Preliminary checks
if feed.fare_rules is None:
return problems
f = feed.fare_rules.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check fare_id
problems = check_column_linked_id(
problems, table, f, "fare_id", feed.fare_attributes
)
# Check route_id
problems = check_column_linked_id(
problems, table, f, "route_id", feed.routes, column_required=False
)
# Check origin_id, destination_id, contains_id
for col in ["origin_id", "destination_id", "contains_id"]:
problems = check_column_linked_id(
problems,
table,
f,
col,
feed.stops,
"zone_id",
column_required=False,
)
return format_problems(problems, as_df=as_df)
def check_feed_info(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.feed_info``.
"""
table = "feed_info"
problems = []
# Preliminary checks
if feed.feed_info is None:
return problems
f = feed.feed_info.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check feed_publisher_name
problems = check_column(
problems, table, f, "feed_publisher_name", valid_str
)
# Check feed_publisher_url
problems = check_column(
problems, table, f, "feed_publisher_url", valid_url
)
# Check feed_lang
problems = check_column(problems, table, f, "feed_lang", valid_lang)
# Check feed_start_date and feed_end_date
cols = ["feed_start_date", "feed_end_date"]
for col in cols:
problems = check_column(
problems, table, f, col, valid_date, column_required=False
)
if set(cols) <= set(f.columns):
d1, d2 = f.loc[0, ["feed_start_date", "feed_end_date"]].values
if pd.notna(d1) and pd.notna(d2) and d1 > d1:
problems.append(
[
"error",
"feed_start_date later than feed_end_date",
table,
[0],
]
)
# Check feed_version
problems = check_column(
problems, table, f, "feed_version", valid_str, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_frequencies(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.frequencies``.
"""
table = "frequencies"
problems = []
# Preliminary checks
if feed.frequencies is None:
return problems
f = feed.frequencies.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check start_time and end_time
time_cols = ["start_time", "end_time"]
for col in time_cols:
problems = check_column(problems, table, f, col, valid_time)
for col in time_cols:
f[col] = f[col].map(hp.timestr_to_seconds)
# Start_time should be earlier than end_time
cond = f["start_time"] >= f["end_time"]
problems = check_table(
problems, table, f, cond, "start_time not earlier than end_time"
)
# Headway periods should not overlap
f = f.sort_values(["trip_id", "start_time"])
for __, group in f.groupby("trip_id"):
a = group["start_time"].values
b = group["end_time"].values
indices = np.flatnonzero(a[1:] < b[:-1]).tolist()
if indices:
problems.append(
[
"error",
"Headway periods for the same trip overlap",
table,
indices,
]
)
# Check headway_secs
v = lambda x: x >= 0
problems = check_column(problems, table, f, "headway_secs", v)
# Check exact_times
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "exact_times", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_routes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.routes``.
"""
table = "routes"
problems = []
# Preliminary checks
if feed.routes is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.routes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check route_id
problems = check_column_id(problems, table, f, "route_id")
# Check agency_id
if "agency_id" in f:
if feed.agency is None:
problems.append(
[
"error",
"agency_id column present in routes agency table missing",
table,
[],
]
)
elif "agency_id" not in feed.agency.columns:
problems.append(
[
"error",
"agency_id column present in routes but not in agency",
table,
[],
]
)
else:
g = f.dropna(subset=["agency_id"])
cond = ~g["agency_id"].isin(feed.agency["agency_id"])
problems = check_table(
problems, table, g, cond, "Undefined agency_id"
)
# Check route_short_name and route_long_name
for column in ["route_short_name", "route_long_name"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
cond = ~(f["route_short_name"].notna() | f["route_long_name"].notna())
problems = check_table(
problems,
table,
f,
cond,
"route_short_name and route_long_name both empty",
)
# Check route_type
v = lambda x: x in range(8)
problems = check_column(problems, table, f, "route_type", v)
# Check route_url
problems = check_column(
problems, table, f, "route_url", valid_url, column_required=False
)
# Check route_color and route_text_color
for col in ["route_color", "route_text_color"]:
problems = check_column(
problems, table, f, col, valid_color, column_required=False
)
if include_warnings:
# Check for duplicated (route_short_name, route_long_name) pairs
cond = f[["route_short_name", "route_long_name"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (route_short_name, route_long_name)",
"warning",
)
# Check for routes without trips
s = feed.trips["route_id"]
cond = ~f["route_id"].isin(s)
problems = check_table(
problems, table, f, cond, "Route has no trips", "warning"
)
return format_problems(problems, as_df=as_df)
def check_shapes(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.shapes``.
"""
table = "shapes"
problems = []
# Preliminary checks
if feed.shapes is None:
return problems
f = feed.shapes.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
f.sort_values(["shape_id", "shape_pt_sequence"], inplace=True)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check shape_id
problems = check_column(problems, table, f, "shape_id", valid_str)
# Check shape_pt_lon and shape_pt_lat
for column, bound in [("shape_pt_lon", 180), ("shape_pt_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check for duplicated (shape_id, shape_pt_sequence) pairs
cond = f[["shape_id", "shape_pt_sequence"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (shape_id, shape_pt_sequence)"
)
# Check if shape_dist_traveled does decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_sid = None
prev_index = None
prev_dist = -1
cols = ["shape_id", "shape_dist_traveled"]
for i, sid, dist in g[cols].itertuples():
if sid == prev_sid and dist < prev_dist:
indices.append(prev_index)
prev_sid = sid
prev_index = i
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
return format_problems(problems, as_df=as_df)
def check_stops(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stops``.
"""
table = "stops"
problems = []
# Preliminary checks
if feed.stops is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stops.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check stop_id
problems = check_column_id(problems, table, f, "stop_id")
# Check stop_code, stop_desc, zone_id, parent_station
for column in ["stop_code", "stop_desc", "zone_id", "parent_station"]:
problems = check_column(
problems, table, f, column, valid_str, column_required=False
)
# Check stop_name
problems = check_column(problems, table, f, "stop_name", valid_str)
# Check stop_lon and stop_lat
if "location_type" in f.columns:
requires_location = f.location_type.isin([0, 1, 2])
else:
requires_location = True
for column, bound in [("stop_lon", 180), ("stop_lat", 90)]:
v = lambda x: pd.notna(x) and -bound <= x <= bound
cond = requires_location & ~f[column].map(v)
problems = check_table(
problems,
table,
f,
cond,
f"{column} out of bounds {[-bound, bound]}",
)
# Check stop_url
problems = check_column(
problems, table, f, "stop_url", valid_url, column_required=False
)
# Check location_type
v = lambda x: x in range(5)
problems = check_column(
problems, table, f, "location_type", v, column_required=False
)
# Check stop_timezone
problems = check_column(
problems,
table,
f,
"stop_timezone",
valid_timezone,
column_required=False,
)
# Check wheelchair_boarding
v = lambda x: x in range(3)
problems = check_column(
problems, table, f, "wheelchair_boarding", v, column_required=False
)
# Check further location_type and parent_station
if "parent_station" in f.columns:
if "location_type" not in f.columns:
problems.append(
[
"error",
"parent_station column present but location_type column missing",
table,
[],
]
)
else:
# Parent stations must be well-defined
S = set(f.stop_id) | {np.nan}
v = lambda x: x in S
problems = check_column(
problems,
table,
f,
"parent_station",
v,
"A parent station must be well-defined",
column_required=False,
)
# Stations must have location type 1
station_ids = f.loc[f.parent_station.notna(), "parent_station"]
cond = f.stop_id.isin(station_ids) & (f.location_type != 1)
problems = check_table(
problems, table, f, cond, "A station must have location_type 1"
)
# Stations must not lie in stations
cond = (f.location_type == 1) & f.parent_station.notna()
problems = check_table(
problems,
table,
f,
cond,
"A station must not lie in another station",
)
# Entrances (type 2), generic nodes (type 3) and boarding areas (type 4)
# need to be part of a parent
cond = f.location_type.isin([2, 3, 4]) & f.parent_station.isna()
problems = check_table(
problems,
table,
f,
cond,
"Entrances, nodes, and boarding areas must be part of a parent station",
)
if include_warnings:
# Check for stops of location type 0 or NaN without stop times
ids = []
if feed.stop_times is not None:
ids = feed.stop_times.stop_id.unique()
cond = ~feed.stops.stop_id.isin(ids)
if "location_type" in feed.stops.columns:
cond &= f.location_type.isin([0, np.nan])
problems = check_table(
problems, table, f, cond, "Stop has no stop times", "warning"
)
return format_problems(problems, as_df=as_df)
def check_stop_times(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.stop_times``.
"""
table = "stop_times"
problems = []
# Preliminary checks
if feed.stop_times is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.stop_times.copy().sort_values(["trip_id", "stop_sequence"])
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_linked_id(
problems, table, f, "trip_id", feed.trips
)
# Check arrival_time and departure_time
v = lambda x: pd.isna(x) or valid_time(x)
for col in ["arrival_time", "departure_time"]:
problems = check_column(problems, table, f, col, v)
# Check that arrival and departure times exist for the first and last
# stop of each trip and for each timepoint.
# For feeds with many trips, iterating through the stop time rows is
# faster than uisg groupby.
if "timepoint" not in f.columns:
f["timepoint"] = np.nan # This will not mess up later timepoint check
indices = []
prev_tid = None
prev_index = None
prev_atime = 1
prev_dtime = 1
for i, tid, atime, dtime, tp in f[
["trip_id", "arrival_time", "departure_time", "timepoint"]
].itertuples():
if tid != prev_tid:
# Check last stop of previous trip
if pd.isna(prev_atime) or pd.isna(prev_dtime):
indices.append(prev_index)
# Check first stop of current trip
if pd.isna(atime) or pd.isna(dtime):
indices.append(i)
elif tp == 1 and (pd.isna(atime) or pd.isna(dtime)):
# Failure at timepoint
indices.append(i)
prev_tid = tid
prev_index = i
prev_atime = atime
prev_dtime = dtime
if pd.isna(prev_atime) or pd.isna(prev_dtime):
indices.append(prev_index)
if indices:
problems.append(
[
"error",
"First/last/time point arrival/departure time missing",
table,
indices,
]
)
# Check stop_id
problems = check_column_linked_id(
problems, table, f, "stop_id", feed.stops
)
# Check for duplicated (trip_id, stop_sequence) pairs
cond = f[["trip_id", "stop_sequence"]].dropna().duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (trip_id, stop_sequence)"
)
# Check stop_headsign
problems = check_column(
problems, table, f, "stop_headsign", valid_str, column_required=False
)
# Check pickup_type and drop_off_type
for col in ["pickup_type", "drop_off_type"]:
v = lambda x: x in range(4)
problems = check_column(
problems, table, f, col, v, column_required=False
)
# Check if shape_dist_traveled decreases on a trip
if "shape_dist_traveled" in f.columns:
g = f.dropna(subset=["shape_dist_traveled"])
indices = []
prev_tid = None
prev_dist = -1
for i, tid, dist in g[["trip_id", "shape_dist_traveled"]].itertuples():
if tid == prev_tid and dist < prev_dist:
indices.append(i)
prev_tid = tid
prev_dist = dist
if indices:
problems.append(
[
"error",
"shape_dist_traveled decreases on a trip",
table,
indices,
]
)
# Check timepoint
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "timepoint", v, column_required=False
)
if include_warnings:
# Check for duplicated (trip_id, departure_time) pairs
cond = f[["trip_id", "departure_time"]].duplicated()
problems = check_table(
problems,
table,
f,
cond,
"Repeated pair (trip_id, departure_time)",
"warning",
)
return format_problems(problems, as_df=as_df)
def check_transfers(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.transfers``.
"""
table = "transfers"
problems = []
# Preliminary checks
if feed.transfers is None:
return problems
f = feed.transfers.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check from_stop_id and to_stop_id
for col in ["from_stop_id", "to_stop_id"]:
problems = check_column_linked_id(
problems, table, f, col, feed.stops, "stop_id"
)
# Check transfer_type
v = lambda x: pd.isna(x) or x in range(5)
problems = check_column(
problems, table, f, "transfer_type", v, column_required=False
)
# Check min_transfer_time
v = lambda x: x >= 0
problems = check_column(
problems, table, f, "min_transfer_time", v, column_required=False
)
return format_problems(problems, as_df=as_df)
def check_trips(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.trips``.
"""
table = "trips"
problems = []
# Preliminary checks
if feed.trips is None:
problems.append(["error", "Missing table", table, []])
else:
f = feed.trips.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check trip_id
problems = check_column_id(problems, table, f, "trip_id")
# Check route_id
problems = check_column_linked_id(
problems, table, f, "route_id", feed.routes
)
# Check service_id
g = pd.DataFrame()
if feed.calendar is not None:
g = pd.concat([g, feed.calendar], sort=False)
if feed.calendar_dates is not None:
g = pd.concat([g, feed.calendar_dates], sort=False)
problems = check_column_linked_id(problems, table, f, "service_id", g)
# Check direction_id
v = lambda x: x in range(2)
problems = check_column(
problems, table, f, "direction_id", v, column_required=False
)
# Check block_id
if "block_id" in f.columns:
v = lambda x: pd.isna(x) or valid_str(x)
cond = ~f["block_id"].map(v)
problems = check_table(problems, table, f, cond, "Blank block_id")
# Check shape_id
problems = check_column_linked_id(
problems, table, f, "shape_id", feed.shapes, column_required=False
)
# Check wheelchair_accessible and bikes_allowed
v = lambda x: x in range(3)
for column in ["wheelchair_accessible", "bikes_allowed"]:
problems = check_column(
problems, table, f, column, v, column_required=False
)
# Check for trips with no stop times
if include_warnings:
s = feed.stop_times["trip_id"] if feed.stop_times is not None else []
cond = ~f["trip_id"].isin(s)
problems = check_table(
problems, table, f, cond, "Trip has no stop times", "warning"
)
return format_problems(problems, as_df=as_df)
def validate(
feed: "Feed", *, as_df: bool = True, include_warnings: bool = True
) -> Union[List, DataFrame]:
"""
Check whether the given feed satisfies the GTFS.
Parameters
----------
feed : Feed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the GTFS is
violated; 'warning' means there is a problem but it's not a
GTFS violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the feed is missing required tables or required
columns.
Notes
-----
- This function interprets the GTFS liberally, classifying problems
as warnings rather than errors where the GTFS is unclear.
For example if a trip_id listed in the trips table is not listed
in the stop times table (a trip with no stop times),
then that's a warning and not an error.
- Timing benchmark: on a 2.80 GHz processor machine with 16 GB of
memory, this function checks `this 31 MB Southeast Queensland feed
<http://transitfeeds.com/p/translink/21/20170310>`_
in 22 seconds, including warnings.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
"check_agency",
"check_calendar",
"check_calendar_dates",
"check_fare_attributes",
"check_fare_rules",
"check_feed_info",
"check_frequencies",
"check_routes",
"check_shapes",
"check_stops",
"check_stop_times",
"check_transfers",
"check_trips",
]
for checker in checkers:
problems.extend(
globals()[checker](feed, include_warnings=include_warnings)
)
# Check calendar/calendar_dates combo
if feed.calendar is None and feed.calendar_dates is None:
problems.append(
["error", "Missing both tables", "calendar & calendar_dates", []]
)
return format_problems(problems, as_df=as_df)
| mit |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py | 69 | 50262 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import gettempdir
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
import numpy as npy
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or numerix arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return npy.alltrue(npy.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
if rcParams['path.simplify']:
self.simplify = (width * imagedpi, height * imagedpi)
else:
self.simplify = None
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self.hatch = None
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def set_hatch(self, hatch):
"""
hatch can be one of:
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
X - crossed diagonal
letters can be combined, in which case all the specified
hatchings are done
if same letter repeats, it increases the density of hatching
in that direction
"""
hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0}
for letter in hatch:
if (letter == '/'): hatches['diag2'] += 1
elif (letter == '\\'): hatches['diag1'] += 1
elif (letter == '|'): hatches['vert'] += 1
elif (letter == '-'): hatches['horiz'] += 1
elif (letter == '+'):
hatches['horiz'] += 1
hatches['vert'] += 1
elif (letter.lower() == 'x'):
hatches['diag1'] += 1
hatches['diag2'] += 1
def do_hatch(angle, density):
if (density == 0): return ""
return """\
gsave
eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth
/hatchgap %d def
pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def
hatchl cvi hatchgap idiv hatchgap mul
hatchgap
hatchr cvi hatchgap idiv hatchgap mul
{hatcht m 0 hatchb hatcht sub r }
for
stroke
grestore
""" % (angle, 12/density)
self._pswriter.write("gsave\n")
self._pswriter.write(do_hatch(90, hatches['horiz']))
self._pswriter.write(do_hatch(0, hatches['vert']))
self._pswriter.write(do_hatch(45, hatches['diag1']))
self._pswriter.write(do_hatch(-45, hatches['diag2']))
self._pswriter.write("grestore\n")
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
l,b,r,t = texmanager.get_ps_bbox(s, fontsize)
w = (r-l)
h = (t-b)
# TODO: We need a way to get a good baseline from
# text.usetex
return w, h, 0
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm')
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(findfont(prop, fontext='afm')))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
im.flipud_out()
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
hexlines = '\n'.join(self._hex_lines(bits))
xscale, yscale = (
w/self.image_magnification, h/self.image_magnification)
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, simplify=None):
path = transform.transform_path(path)
ps = []
last_points = None
for points, code in path.iter_segments(simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
ps = self._convert_path(path, transform, self.simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
ps_cmd.append(self._convert_path(marker_path, marker_trans))
if rgbFace:
ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore'])
ps_cmd.extend(['stroke', 'grestore', '} bind def'])
tpath = trans.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
corr = 0#w/2*(fontsize-10)/10
pos = _nums_to_str(x-corr, y)
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif isinstance(s, unicode):
return self.draw_unicode(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
font = self._get_font_afm(prop)
l,b,w,h = font.get_str_bbox(s)
fontsize = prop.get_size_in_points()
l *= 0.001*fontsize
b *= 0.001*fontsize
w *= 0.001*fontsize
h *= 0.001*fontsize
if angle==90: l,b = -b, l # todo generalize for arb rotations
pos = _nums_to_str(x-l, y-b)
thetext = '(%s)' % s
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
rotate = '%1.1f rotate' % angle
setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3]
#h = 0
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(pos)s moveto
%(rotate)s
%(thetext)s
%(setcolor)s
show
grestore
""" % locals()
self._draw_ps(ps, gc, None)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
write("%s m\n"%_nums_to_str(x,y))
if angle:
write("gsave\n")
write("%s rotate\n"%_num_to_str(angle))
descent = font.get_descent() / 64.0
if descent:
write("0 %s rmoveto\n"%_num_to_str(descent))
write("(%s) show\n"%quote_ps_string(s))
if angle:
write("grestore\n")
def new_gc(self):
return GraphicsContextPS()
def draw_unicode(self, gc, x, y, s, prop, angle):
"""draw a unicode string. ps doesn't have unicode support, so
we have to do this the hard way
"""
if rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\ngrestore\n")
else:
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
hatch = gc.get_hatch()
if hatch:
self.set_hatch(hatch)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.get("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.get("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.get("dpi", 72)
facecolor = kwargs.get("facecolor", "w")
edgecolor = kwargs.get("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
elif is_writable_file_like(outfile):
title = None
tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fh = file(tmpfile, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in renderer.used_characters.values():
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fonttype = rcParams['ps.fonttype']
convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = file(tmpfile)
print >>outfile, fh.read()
else:
shutil.move(tmpfile, outfile)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
fh = file(tmpfile, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
convert_psfrags(tmpfile, renderer.psfrag, font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else: shutil.move(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
paper = '-sPAPERSIZE=%s'% ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \
(ptype, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox):
"""
Convert the postscript to encapsulated postscript.
"""
bbox_info = get_bbox(tmpfile, bbox)
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
break
elif line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%Pages'):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| agpl-3.0 |
lehinevych/Dato-Core | src/unity/python/graphlab/data_structures/sframe.py | 13 | 196438 | """
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import infer_type_of_list
from graphlab.cython.context import debug_trace as cython_context
from graphlab.cython.cy_sframe import UnitySFrameProxy
from graphlab.util import _check_canvas_enabled, _make_internal_url, _is_callable
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import graphlab.aggregate
import graphlab
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import inspect
from graphlab.deps import pandas, HAS_PANDAS
import time
import itertools
import os
import subprocess
import uuid
import platform
__all__ = ['SFrame']
SFRAME_GARBAGE_COLLECTOR = []
FOOTER_STRS = ['Note: Only the head of the SFrame is printed.',
'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.']
LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.',
'You can use len(sf) to force materialization.']
SFRAME_ROOTS = [# Binary/lib location in production egg
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')),
# Build tree location of SFrame binaries
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'sframe')),
# Location of python sources
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'unity', 'python', 'graphlab')),
# Build tree dependency location
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', '..', '..', 'deps', 'local', 'lib'))
]
RDD_SFRAME_PICKLE = "rddtosf_pickle"
RDD_SFRAME_NONPICKLE = "rddtosf_nonpickle"
SFRAME_RDD_PICKLE = "sftordd_pickle"
HDFS_LIB = "libhdfs.so"
RDD_JAR_FILE = "graphlab-create-spark-integration.jar"
SYS_UTIL_PY = "sys_util.py"
RDD_SUPPORT_INITED = False
BINARY_PATHS = {}
STAGING_DIR = None
RDD_SUPPORT = True
PRODUCTION_RUN = False
YARN_OS = None
SPARK_SUPPORT_NAMES = {'RDD_SFRAME_PATH':'rddtosf_pickle',
'RDD_SFRAME_NONPICKLE_PATH':'rddtosf_nonpickle',
'SFRAME_RDD_PATH':'sftordd_pickle',
'HDFS_LIB_PATH':'libhdfs.so',
'RDD_JAR_PATH':'graphlab-create-spark-integration.jar',
'SYS_UTIL_PY_PATH':'sys_util.py',
'SPARK_PIPE_WRAPPER_PATH':'spark_pipe_wrapper'}
first = True
for i in SFRAME_ROOTS:
for key,val in SPARK_SUPPORT_NAMES.iteritems():
tmp_path = os.path.join(i, val)
if key not in BINARY_PATHS and os.path.isfile(tmp_path):
BINARY_PATHS[key] = tmp_path
if all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
if first:
PRODUCTION_RUN = True
break
first = False
if not all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
RDD_SUPPORT = False
def get_spark_integration_jar_path():
"""
The absolute path of the jar file required to enable GraphLab Create's
integration with Apache Spark.
"""
if 'RDD_JAR_PATH' not in BINARY_PATHS:
raise RuntimeError("Could not find a spark integration jar. "\
"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?")
return BINARY_PATHS['RDD_JAR_PATH']
def __rdd_support_init__(sprk_ctx):
global YARN_OS
global RDD_SUPPORT_INITED
global STAGING_DIR
global BINARY_PATHS
if not RDD_SUPPORT or RDD_SUPPORT_INITED:
return
# Make sure our GraphLabUtil scala functions are accessible from the driver
try:
tmp = sprk_ctx._jvm.org.graphlab.create.GraphLabUtil.EscapeString(sprk_ctx._jvm.java.lang.String("1,2,3,4"))
except:
raise RuntimeError("Could not execute RDD translation functions. "\
"Please make sure you have started Spark "\
"(either with spark-submit or pyspark) with the following flag set:\n"\
"'--driver-class-path " + BINARY_PATHS['RDD_JAR_PATH']+"'\n"\
"OR set the property spark.driver.extraClassPath in spark-defaults.conf")
dummy_rdd = sprk_ctx.parallelize([1])
if PRODUCTION_RUN and sprk_ctx.master == 'yarn-client':
# Get cluster operating system
os_rdd = dummy_rdd.map(lambda x: platform.system())
YARN_OS = os_rdd.collect()[0]
# Set binary path
for i in BINARY_PATHS.keys():
s = BINARY_PATHS[i]
if os.path.basename(s) == SPARK_SUPPORT_NAMES['SYS_UTIL_PY_PATH']:
continue
if YARN_OS == 'Linux':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'linux', os.path.basename(s))
elif YARN_OS == 'Darwin':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'osx', os.path.basename(s))
else:
raise RuntimeError("YARN cluster has unsupported operating system "\
"(something other than Linux or Mac OS X). "\
"Cannot convert RDDs on this cluster to SFrame.")
# Create staging directory
staging_dir = '.graphlabStaging'
if sprk_ctx.master == 'yarn-client':
tmp_loc = None
# Get that staging directory's full name
tmp_loc = dummy_rdd.map(
lambda x: subprocess.check_output(
["hdfs", "getconf", "-confKey", "fs.defaultFS"]).rstrip()).collect()[0]
STAGING_DIR = os.path.join(tmp_loc, "user", sprk_ctx.sparkUser(), staging_dir)
if STAGING_DIR is None:
raise RuntimeError("Failed to create a staging directory on HDFS. "\
"Do your cluster nodes have a working hdfs client?")
# Actually create the staging dir
unity = glconnect.get_unity()
unity.__mkdir__(STAGING_DIR)
unity.__chmod__(STAGING_DIR, 0777)
elif sprk_ctx.master[0:5] == 'local':
# Save the output sframes to the same temp workspace this engine is
# using
#TODO: Consider cases where server and client aren't on the same machine
unity = glconnect.get_unity()
STAGING_DIR = unity.get_current_cache_file_location()
if STAGING_DIR is None:
raise RuntimeError("Could not retrieve local staging directory! \
Please contact us on http://forum.dato.com.")
else:
raise RuntimeError("Your spark context's master is '" +
str(sprk_ctx.master) +
"'. Only 'local' and 'yarn-client' are supported.")
if sprk_ctx.master == 'yarn-client':
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_PATH'])
sprk_ctx.addFile(BINARY_PATHS['HDFS_LIB_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SFRAME_RDD_PATH'])
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SYS_UTIL_PY_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'])
sprk_ctx._jsc.addJar(BINARY_PATHS['RDD_JAR_PATH'])
RDD_SUPPORT_INITED = True
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = graphlab.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
class SFrame(object):
"""
A tabular, column-mutable dataframe object that can scale to big data. The
data in SFrame is stored column-wise on the GraphLab Server side, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~graphlab.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
* Apache Avro
* PySpark RDD
and from the following sources:
* your local file system
* the GraphLab Server's file system
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://dato.com/learn/user
guide/index.html#Working_with_data_Tabular_data>`_, `API Translator
<https://dato.com/learn/translator>`_, `How-Tos
<https://dato.com/learn/how-to>`_, and data science `Gallery
<https://dato.com/learn/gallery>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SFrame cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. A similar restriction applies to
:py:class:`graphlab.SGraph` and :py:class:`graphlab.SArray`.
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. GRAPHLAB_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. GRAPHLAB_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import graphlab
>>> from graphlab import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started. Alternatively, you can use
:py:func:`graphlab.aws.set_credentials()` to set the credentials after
python is started and :py:func:`graphlab.aws.get_credentials()` to verify
these environment variables.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['GRAPHLAB_JAVA_HOME'] = '/my/path/to/java'
>>> from graphlab import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = gl.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`graphlab.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> gl.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> gl.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> gl.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> gl.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> gl.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 seperated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> gl.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'seperated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = graphlab.load_sframe('mysframedir')
**Column Manipulation **
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~graphlab.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from graphlab import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ['shape', '__proxy__', '_proxy']
def __init__(self, data=None,
format='auto',
_proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
tracker = _mt._get_metric_tracker()
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy(glconnect.get_client())
_format = None
if (format == 'auto'):
if (HAS_PANDAS and isinstance(data, pandas.DataFrame)):
_format = 'dataframe'
tracker.track('sframe.location.memory', value=1)
elif (isinstance(data, str) or isinstance(data, unicode)):
if data.find('://') == -1:
suffix = 'local'
else:
suffix = data.split('://')[0]
tracker.track(('sframe.location.%s' % (suffix)), value=1)
if data.endswith(('.csv', '.csv.gz')):
_format = 'csv'
elif data.endswith(('.tsv', '.tsv.gz')):
_format = 'tsv'
elif data.endswith(('.txt', '.txt.gz')):
print "Assuming file is csv. For other delimiters, " + \
"please use `SFrame.read_csv`."
_format = 'csv'
else:
_format = 'sframe'
elif type(data) == SArray:
_format = 'sarray'
elif isinstance(data, SFrame):
_format = 'sframe_obj'
elif (hasattr(data, 'iteritems')):
_format = 'dict'
tracker.track('sframe.location.memory', value=1)
elif hasattr(data, '__iter__'):
_format = 'array'
tracker.track('sframe.location.memory', value=1)
elif data is None:
_format = 'empty'
else:
raise ValueError('Cannot infer input type for data ' + str(data))
else:
_format = format
tracker.track(('sframe.format.%s' % _format), value=1)
with cython_context():
if (_format == 'dataframe'):
self.__proxy__.load_from_dataframe(data)
elif (_format == 'sframe_obj'):
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif (_format == 'sarray'):
self.__proxy__.add_column(data.__proxy__, "")
elif (_format == 'array'):
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr)
elif SArray in unique_types:
raise ValueError("Cannot create SFrame from mix of regular values and SArrays")
else:
self.__proxy__.add_column(SArray(data).__proxy__, "")
elif (_format == 'dict'):
for key,val in iter(sorted(data.iteritems())):
if (type(val) == SArray):
self.__proxy__.add_column(val.__proxy__, key)
else:
self.__proxy__.add_column(SArray(val).__proxy__, key)
elif (_format == 'csv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter=',', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'tsv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter='\t', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'sframe'):
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif (_format == 'empty'):
pass
else:
raise ValueError('Unknown input type: ' + format)
sframe_size = -1
if self.__has_size__():
sframe_size = self.num_rows()
tracker.track('sframe.row.size', value=sframe_size)
tracker.track('sframe.col.size', value=self.num_cols())
@staticmethod
def _infer_column_types_from_lines(first_rows):
if (len(first_rows.column_names()) < 1):
print "Insufficient number of columns to perform type inference"
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print "Insufficient number of rows to perform type inference"
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [list(first_rows[col])
for col in first_rows.column_names()]
# transpose
all_column_values = [list(x) for x in zip(*all_column_values_transposed)]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print "Unable to infer column types. Defaulting to str"
return str
import types
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if (len(d) == 1):
# easy case. both agree on the type
continue
if ((int in d) and (float in d)):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif ((array.array in d) and (list in d)):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif types.NoneType in d:
# one is a NoneType. assign to other type
if currow[j] != types.NoneType:
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything whih is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == types.NoneType:
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True,
store_errors=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = escape_char
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows != None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy(glconnect.get_client())
internal_url = _make_internal_url(url)
if (not verbose):
glconnect.get_client().set_log_progress(False)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first 100 rows (using all the desired arguments).
first_rows = graphlab.SFrame.read_csv(url, nrows=100,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values = na_values)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']'
print "------------------------------------------------------"
print "Inferred types from first line of file as "
print "column_type_hints="+ typelist
print "If parsing fails due to incorrect types, you can correct"
print "the inferred type list above and pass it to read_csv in"
print "the column_type_hints argument"
print "------------------------------------------------------"
column_type_inference_was_used = True
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
print 'Could not detect types. Using str for each column.'
if type(column_type_hints) is type:
type_hints = {'__all_columns__': column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))
elif type(column_type_hints) is dict:
type_hints = column_type_hints
else:
raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.")
_mt._get_metric_tracker().track('sframe.csv.parse')
suffix=''
if url.find('://') == -1:
suffix = 'local'
else:
suffix = url.split('://')[0]
_mt._get_metric_tracker().track(('sframe.location.%s' % (suffix)), value=1)
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
if column_type_inference_was_used:
# try again
print "Unable to parse the file with automatic type inference."
print "Defaulting to column_type_hints=str"
type_hints = {'__all_columns__': str}
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except:
raise
else:
raise
glconnect.get_client().set_log_progress(True)
return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.iteritems() })
@classmethod
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> (sf, bad_lines) = graphlab.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=True)
@classmethod
def read_csv(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = graphlab.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = graphlab.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = graphlab.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = graphlab.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = graphlab.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> sf = graphlab.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=False)[0]
def to_schema_rdd(self,sc,sql,number_of_partitions=4):
"""
Convert the current SFrame to the Spark SchemaRDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
sql : SQLContext
sql is an existing SQLContext.
number_of_partitions : int
number of partitions for the output rdd
Returns
----------
out: SchemaRDD
Examples
--------
>>> from pyspark import SparkContext, SQLContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sqlc = SQLContext(sc)
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_schema_rdd(sc, sqlc)
>>> rdd.collect()
[Row(x=1, y=u'fish'), Row(x=2, y=u'chips'), Row(x=3, y=u'salad')]
"""
def homogeneous_type(seq):
if seq is None or len(seq) == 0:
return True
iseq = iter(seq)
first_type = type(next(iseq))
return True if all( (type(x) is first_type) for x in iseq ) else False
if len(self) == 0:
raise ValueError("SFrame is empty")
column_names = self.column_names()
first_row = self.head(1)[0]
for name in column_names:
if hasattr(first_row[name],'__iter__') and homogeneous_type(first_row[name]) is not True:
raise TypeError("Support for translation to Spark SchemaRDD not enabled for heterogeneous iterable type (column: %s). Use SFrame.to_rdd()." % name)
for _type in self.column_types():
if(_type.__name__ == 'datetime'):
raise TypeError("Support for translation to Spark SchemaRDD not enabled for datetime type. Use SFrame.to_rdd() ")
rdd = self.to_rdd(sc,number_of_partitions);
from pyspark.sql import Row
rowRdd = rdd.map(lambda x: Row(**x))
return sql.inferSchema(rowRdd)
def to_rdd(self, sc, number_of_partitions=4):
"""
Convert the current SFrame to the Spark RDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
number_of_partitions: int
number of partitions for the output rdd
Returns
----------
out: RDD
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_rdd(sc)
>>> rdd.collect()
[{'x': 1L, 'y': 'fish'}, {'x': 2L, 'y': 'chips'}, {'x': 3L, 'y': 'salad'}]
"""
_mt._get_metric_tracker().track('sframe.to_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
for _type in self.column_types():
if(_type.__name__ == 'Image'):
raise TypeError("Support for translation to Spark RDDs not enabled for Image type.")
if type(number_of_partitions) is not int:
raise ValueError("number_of_partitions parameter expects an integer type")
if number_of_partitions == 0:
raise ValueError("number_of_partitions can not be initialized to zero")
# Save SFrame in a temporary place
tmp_loc = self.__get_staging_dir__(sc)
sf_loc = os.path.join(tmp_loc, str(uuid.uuid4()))
self.save(sf_loc)
# Keep track of the temporary sframe that is saved(). We need to delete it eventually.
dummysf = load_sframe(sf_loc)
dummysf.__proxy__.delete_on_close()
SFRAME_GARBAGE_COLLECTOR.append(dummysf)
sframe_len = self.__len__()
small_partition_size = sframe_len/number_of_partitions
big_partition_size = small_partition_size + 1
num_big_partition_size = sframe_len % number_of_partitions
num_small_partition_size = number_of_partitions - num_big_partition_size
count = 0
start_index = 0
ranges = []
while(count < number_of_partitions):
if(count < num_big_partition_size):
ranges.append((str(start_index)+":"+str(start_index + big_partition_size)))
start_index = start_index + big_partition_size
else:
ranges.append((str(start_index)+":"+str(start_index + small_partition_size)))
start_index = start_index + small_partition_size
count+=1
from pyspark import RDD
rdd = sc.parallelize(ranges,number_of_partitions)
if sc.master[0:5] == 'local':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + \
" " + BINARY_PATHS['SFRAME_RDD_PATH'] + " " + sf_loc)
elif sc.master == 'yarn-client':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] + \
" " + "./" + SPARK_SUPPORT_NAMES['SFRAME_RDD_PATH'] + \
" " + sf_loc)
serializedRdd = sc._jvm.org.graphlab.create.GraphLabUtil.stringToByte(pipeRdd)
import pyspark
output_rdd = RDD(serializedRdd,sc,pyspark.serializers.PickleSerializer())
return output_rdd
@classmethod
def __get_staging_dir__(cls,cur_sc):
if not RDD_SUPPORT_INITED:
__rdd_support_init__(cur_sc)
return STAGING_DIR
@classmethod
def from_rdd(cls, rdd):
"""
Convert a Spark RDD into a GraphLab Create SFrame.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
rdd : pyspark.rdd.RDD
Returns
-------
out : SFrame
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> rdd = sc.parallelize([1,2,3])
>>> sf = SFrame.from_rdd(rdd)
>>> sf
Data:
+-----+
| X1 |
+-----+
| 1.0 |
| 2.0 |
| 3.0 |
+-----+
[3 rows x 1 columns]
"""
_mt._get_metric_tracker().track('sframe.from_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
checkRes = rdd.take(1);
if len(checkRes) > 0 and checkRes[0].__class__.__name__ == 'Row' and rdd.__class__.__name__ not in {'SchemaRDD','DataFrame'}:
raise Exception("Conversion from RDD(pyspark.sql.Row) to SFrame not supported. Please call inferSchema(RDD) first.")
if(rdd._jrdd_deserializer.__class__.__name__ == 'UTF8Deserializer'):
return SFrame.__from_UTF8Deserialized_rdd__(rdd)
sf_names = None
rdd_type = "rdd"
if rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}:
rdd_type = "schemardd"
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
else:
sf_names = first_row.__FIELDS__
sf_names = [str(i) for i in sf_names]
cur_sc = rdd.ctx
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
mode = "batch"
if(rdd._jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
mode = "pickle"
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " + \
BINARY_PATHS['RDD_SFRAME_PATH'] + " " + tmp_loc +\
" " + mode + " " + rdd_type)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" + SPARK_SUPPORT_NAMES['RDD_SFRAME_PATH'] + " " +\
tmp_loc + " " + mode + " " + rdd_type)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf_coltypes = out_sf.column_types()
if(len(out_sf_coltypes) != 0):
sf_coltypes = sf.column_types()
sf_temp_names = sf.column_names()
out_sf_temp_names = out_sf.column_names()
for i in range(len(sf_coltypes)):
if sf_coltypes[i] != out_sf_coltypes[i]:
print "mismatch for types %s and %s" % (sf_coltypes[i],out_sf_coltypes[i])
sf[sf_temp_names[i]] = sf[sf_temp_names[i]].astype(str)
out_sf[out_sf_temp_names[i]] = out_sf[out_sf_temp_names[i]].astype(str)
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def __from_UTF8Deserialized_rdd__(cls, rdd):
_mt._get_metric_tracker().track('sframe.__from_UTF8Deserialized_rdd__')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
cur_sc = rdd.ctx
sf_names = None
sf_types = None
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
if(rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}):
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
sf_types = [type(i) for i in first_row.values()]
else:
sf_names = first_row.__FIELDS__
sf_types = [type(i) for i in first_row]
sf_names = [str(i) for i in sf_names]
for _type in sf_types:
if(_type != int and _type != str and _type != float and _type != unicode):
raise TypeError("Only int, str, and float are supported for now")
types = ""
for i in sf_types:
types += i.__name__ + ","
if cur_sc.master[0:5] == 'local':
t = rdd._jschema_rdd.toJavaStringOfValues().pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " + tmp_loc +\
" " + types)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.toJavaStringOfValues(
rdd._jschema_rdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc + " " + types)
else:
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def from_odbc(cls, db, sql, verbose=False):
"""
Convert a table or query from a database to an SFrame.
This function does not do any checking on the given SQL query, and
cannot know what effect it will have on the database. Any side effects
from the query will be reflected on the database. If no result
rows are returned, an empty SFrame is created.
Keep in mind the default case your database stores table names in. In
some cases, you may need to add quotation marks (or whatever character
your database uses to quote identifiers), especially if you created the
table using `to_odbc`.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
sql : str
A SQL query. The query must be acceptable by the ODBC driver used by
`graphlab.extensions._odbc_connection.unity_odbc_connection`.
Returns
-------
out : SFrame
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
does not apply to the machine your database is running, which can (and
often will) be running on a separate machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> a_table = graphlab.SFrame.from_odbc(db, "SELECT * FROM a_table")
>>> join_result = graphlab.SFrame.from_odbc(db, 'SELECT * FROM "MyTable" a, "AnotherTable" b WHERE a.id=b.id')
"""
result = db.execute_query(sql)
if not isinstance(result, SFrame):
raise RuntimeError("Cannot create an SFrame for query. No result set.")
cls = result
return cls
def to_odbc(self, db, table_name, append_if_exists=False, verbose=True):
"""
Convert an SFrame to a table in a database.
By default, searches for a table in the database with the given name.
If found, this will attempt to append all the rows of the SFrame to the
end of the table. If not, this will create a new table with the given
name. This behavior is toggled with the `append_if_exists` flag.
When creating a new table, GraphLab Create uses a heuristic approach to
pick a corresponding type for each column in the SFrame using the type
information supplied by the database's ODBC driver. Your driver must
support giving this type information for GraphLab Create to support
writing to the database.
To allow more expressive and accurate naming, `to_odbc` puts quotes
around each identifier (table names and column names). Depending on
your database, you may need to refer to the created table with quote
characters around the name. This character is not the same for all
databases, but '"' is the most common.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
table_name : str
The name of the table you would like to create/append to.
append_if_exists : bool
If True, this will attempt to append to the table named `table_name`
if it is found to exist in the database.
verbose : bool
Print progress updates on the insertion process.
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
"local machine" rule does not apply to the machine your database is
running on, which can (and often will) be running on a separate
machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> sf = graphlab.SFrame({'a':[1,2,3],'b':['hi','pika','bye']})
>>> sf.to_odbc(db, 'a_cool_table')
"""
if (not verbose):
glconnect.get_client().set_log_progress(False)
db._insert_sframe(self, table_name, append_if_exists)
if (not verbose):
glconnect.get_client().set_log_progress(True)
def __repr__(self):
"""
Returns a string description of the frame
"""
printed_sf = self._imagecols_to_stringcols()
ret = self.__get_column_description__()
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
if (len(printed_sf.head()) > 0):
ret = ret + str(self)
else:
ret = ret + "\t[]"
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(self, wrap_text=False, max_row_width=80,
max_column_width=30, max_columns=20,
max_rows_to_display=60):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype() is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if (type(value) is array.array):
return str(list(value))
elif (type(value) is list):
return '[' + ", ".join(_value_to_str(x) for x in value) + ']'
else:
return str(value)
def _escape_space(s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def _truncate_respect_unicode(s, max_length):
if (len(s) <= max_length):
return s
else:
u = unicode(s, 'utf-8', errors='replace')
return u[:max_length].encode('utf-8')
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
return unicode(s, 'utf-8', errors='replace')
else:
ret = ''
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(last_line, max_column_width - 4)
ret = wrapped_lines[0] + '\n' + last_line + ' ...'
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + '...'
return unicode(ret, 'utf-8', errors='replace')
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(max_column_width, max(len(str(x)) for x in headsf[col]))
else:
col_width = max_column_width
if (table_width + col_width < max_row_width):
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]])
table_width = str(tbl).find('\n')
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = 'c'
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_cols() > max_columns:
row_of_tables[-1].add_column('...', ['...'] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(['...'] * num_column_of_last_table)
return row_of_tables
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
See Also
--------
head, tail
"""
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == graphlab.Image]
#If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
if len(image_column_names) > 0:
printed_sf = SFrame()
for t in names:
if t in image_column_names:
printed_sf[t] = self[t]._head_str(num_rows)
else:
printed_sf[t] = self[t].head(num_rows)
else:
printed_sf = self
return printed_sf
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if (not footer):
return '\n'.join([str(tb) for tb in row_of_tables])
if self.__has_size__():
footer = '[%d rows x %d columns]\n' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '\n'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]\n' % self.num_columns()
footer += '\n'.join(LAZY_FOOTER_STRS)
return '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True, max_row_width=120, max_columns=40, max_column_width=25, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if self.__has_size__():
footer = '[%d rows x %d columns]<br/>' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '<br/>'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]<br/>' % self.num_columns()
footer += '<br/>'.join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = '\n</div>'
return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_cols(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_columns, num_rows
"""
return self.__proxy__.num_columns()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_cols, num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
assert HAS_PANDAS
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None, seed=None):
"""
Transform each row to an :class:`~graphlab.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = graphlab.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
double mean(const std::map<flexible_type, flexible_type>& dict) {
double sum = 0.0;
for (const auto& kv: dict) sum += (double)kv.second;
return sum / dict.size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(mean, "row");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sf = graphlab.SFrame({'x0': [1, 2, 3], 'x1': [2, 3, 1],
... 'x2': [3, 1, 2]})
>>> sf.apply(example.mean)
dtype: float
Rows: 3
[2.0,2.0,2.0]
"""
assert _is_callable(fn), "Input must be a function"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype()
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.apply')
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = graphlab.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.flat_map')
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError, \
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types."
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed))
def sample(self, fraction, seed=None):
"""
Sample the current SFrame's rows.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch. Must be between 0 and 1.
The number of rows returned is approximately the fraction times the
number of rows.
seed : int, optional
Seed for the random number generator used to sample.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if not seed:
seed = int(time.time())
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
_mt._get_metric_tracker().track('sframe.sample')
if (self.num_rows() == 0 or self.num_cols() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed))
def random_split(self, fraction, seed=None):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch for the first returned
SFrame. Must be between 0 and 1.
seed : int, optional
Seed for the random number generator used to split.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = graphlab.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print len(sf_train), len(sf_test)
922 102
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return (SFrame(), SFrame())
if not seed:
seed = int(time.time())
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
_mt._get_metric_tracker().track('sframe.random_split')
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed)
return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1]))
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = graphlab.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
_mt._get_metric_tracker().track('sframe.topk')
sf = self[self[column_name].topk_index(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
_mt._get_metric_tracker().track('sframe.save', properties={'format':format})
if format == None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
else:
raise ValueError("Unsupported format: {}".format(format))
def select_column(self, key):
"""
Get a reference to the :class:`~graphlab.SArray` that corresponds with
the given key. Throws an exception if the key is something other than a
string or if the key is not found.
Parameters
----------
key : str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``key``.
See Also
--------
select_columns
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(key, str):
raise TypeError("Invalid key type: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(key))
def select_columns(self, keylist):
"""
Get SFrame composed only of the columns referred to in the given list of
keys. Throws an exception if ANY of the keys are not in this SFrame or
if ``keylist`` is anything other than a list of strings.
Parameters
----------
keylist : list[str]
The list of column names.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``keylist`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not hasattr(keylist, '__iter__'):
raise TypeError("keylist must be an iterable")
if not all([isinstance(x, str) for x in keylist]):
raise TypeError("Invalid key type: must be str")
key_set = set(keylist)
if (len(key_set)) != len(keylist):
for key in key_set:
if keylist.count(key) > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(keylist))
def add_column(self, data, name=""):
"""
Add a column to this SFrame. The number of elements in the data given
must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self. If no
name is given, a default name is chosen.
Parameters
----------
data : SArray
The 'column' of data to add.
name : string, optional
The name of the column. If no name is given, a default name is
chosen.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = graphlab.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalant to `sf['species'] = sa`
>>> sf.add_column(sa, name='species')
>>> sf
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
with cython_context():
self.__proxy__.add_column(data.__proxy__, name)
return self
def add_columns(self, data, namelist=None):
"""
Adds multiple columns to this SFrame. The number of elements in all
columns must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
namelist : list of string, optional
A list of column names. All names must be specified. ``namelist`` is
ignored if data is an SFrame.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = graphlab.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> sf.add_columns(sf2)
>>> sf
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
namelist = other.column_names()
my_columns = set(self.column_names())
for name in namelist:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list : must all be str")
with cython_context():
self.__proxy__.add_columns([x.__proxy__ for x in datalist], namelist)
return self
def remove_column(self, name):
"""
Remove a column from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
name : string
The name of the column to remove.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> sf.remove_column('val')
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
colid = self.column_names().index(name)
with cython_context():
self.__proxy__.remove_column(colid)
return self
def remove_columns(self, column_names):
"""
Remove one or more columns from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> sf.remove_columns(['val1', 'val2'])
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
for colid in reversed(deletion_indices):
with cython_context():
self.__proxy__.remove_column(colid)
return self
def swap_columns(self, column_1, column_2):
"""
Swap the columns with the given names. This operation modifies the
current SFrame in place and returns self.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf.swap_columns('id', 'val')
>>> sf
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_1)
colid_2 = colnames.index(column_2)
with cython_context():
self.__proxy__.swap_columns(colid_1, colid_2)
return self
def rename(self, names):
"""
Rename the given columns. ``names`` is expected to be a dict specifying
the old and new names. This changes the names of the columns given as
the keys and replaces them with the names given as the values. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> sf.rename({'X1': 'name', 'X2':'address'})
>>> sf
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
with cython_context():
for k in names:
colid = self.column_names().index(k)
self.__proxy__.set_column_name(colid, names[k])
return self
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
Calls `select_column` on `key`
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif type(key) is list:
return self.select_columns(key)
elif type(key) is str:
return self.select_column(key)
elif type(key) is int:
if key < 0:
key = len(self) + key
if key >= len(self):
raise IndexError("SFrame index out of range")
return list(SFrame(_proxy = self.__proxy__.copy_range(key, 1, key+1)))[0]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key)
elif type(key) is str:
sa_value = None
if (type(value) is SArray):
sa_value = value
elif hasattr(value, '__iter__'): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = (self.num_cols() == 1)
if (single_column):
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key)
else:
# add the column to a unique column name.
tmpname = '__' + '-'.join(self.column_names())
try:
self.add_column(sa_value, tmpname)
except Exception as e:
if (single_column):
self.add_column(saved_column, key)
raise
if (not single_column):
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname)
self.remove_column(key)
self.rename({tmpname: key})
else:
raise TypeError('Cannot set column with key type ' + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key)
def __materialize__(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
_mt._get_metric_tracker().track('sframe.__iter__')
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while(True):
for j in ret:
yield dict(zip(column_names, j))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = graphlab.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.append')
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break;
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name);
processed_other_frame.add_column(other_column, col_name)
# check column type
if my_column_types[i] != other_column.dtype():
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype()))
with cython_context():
processed_other_frame.__materialize__()
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__))
def groupby(self, key_columns, operations, *args):
"""
Perform a group on the key_columns followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~graphlab.aggregate` for more detail on the aggregators.
Parameters
----------
key_columns : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import graphlab.aggregate as agg
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_columns='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_columns='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_columns is a list
if isinstance(key_columns, str):
key_columns = [key_columns]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_columns:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column " + column + " does not exist in SFrame")
if self[column].dtype() == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not(isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.")
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for (col,output) in zip(column[0],key):
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition of output column: " + key)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not graphlab.aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
_mt._get_metric_tracker().track('sframe.groupby', properties={'operator':op})
with cython_context():
return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns,
group_output_columns, group_ops))
def join(self, right, on=None, how='inner'):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
Returns
-------
out : SFrame
Examples
--------
>>> animals = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = graphlab.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.join', properties={'type':how})
available_join_types = ['left','right','outer','inner']
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~graphlab.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.filter_by')
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not hasattr(values, '__iter__'):
values = [values]
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name)
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
existing_type = self.column_types()[self.column_names().index(column_name)]
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError("Type of given values does not match type of column '" +
column_name + "' in SFrame.")
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
@_check_canvas_enabled
def show(self, columns=None, view=None, x=None, y=None):
"""
show(columns=None, view=None, x=None, y=None)
Visualize the SFrame with GraphLab Create :mod:`~graphlab.canvas`. This function
starts Canvas if it is not already running. If the SFrame has already been plotted,
this function will update the plot.
Parameters
----------
columns : list of str, optional
The columns of this SFrame to show in the SFrame view. In an
interactive browser target of Canvas, the columns will be selectable
and reorderable through the UI as well. If not specified, the
SFrame view will use all columns of the SFrame.
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on which Canvas target is set).
- 'Table': Show a scrollable, tabular view of the data in the
SFrame.
- 'Summary': Show a list of columns with some summary statistics
and plots for each column.
- 'Scatter Plot': Show a scatter plot of two numeric columns.
- 'Heat Map': Show a heat map of two numeric columns.
- 'Bar Chart': Show a bar chart of one numeric and one categorical
column.
- 'Line Chart': Show a line chart of one numeric and one
categorical column.
x : str, optional
The column to use for the X axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the columns
in this SFrame. For Scatter Plot and Heat Map, the column must be
numeric (int or float). If not set, defaults to the first available
valid column.
y : str, optional
The column to use for the Y axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the numeric
columns in this SFrame. If not set, defaults to the second
available numeric column.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view.
See Also
--------
canvas
Examples
--------
Suppose 'sf' is an SFrame, we can view it in GraphLab Canvas using:
>>> sf.show()
To choose a column filter (applied to all SFrame views):
>>> sf.show(columns=["Foo", "Bar"]) # use only columns 'Foo' and 'Bar'
>>> sf.show(columns=sf.column_names()[3:7]) # use columns 3-7
To choose a specific view of the SFrame:
>>> sf.show(view="Summary")
>>> sf.show(view="Table")
>>> sf.show(view="Bar Chart", x="col1", y="col2")
>>> sf.show(view="Line Chart", x="col1", y="col2")
>>> sf.show(view="Scatter Plot", x="col1", y="col2")
>>> sf.show(view="Heat Map", x="col1", y="col2")
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sframe
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sframe.SFrameView(self, params={
'view': view,
'columns': columns,
'x': x,
'y': y
}))
def pack_columns(self, columns=None, column_prefix=None, dtype=list,
fill_na=None, remove_prefix=True, new_column_name=None):
"""
Pack two or more columns of the current SFrame into one single
column.The result is a new SFrame with the unaffected columns from the
original SFrame plus the newly created column.
The list of columns that are packed is chosen through either the
``columns`` or ``column_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns`` explicitly specifies the list of
columns to pack, while ``column_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
columns : list[str], optional
A list of column names to be packed. There needs to have at least
two columns to pack. If omitted and `column_prefix` is not
specified, all columns from current SFrame are packed. This
parameter is mutually exclusive with the `column_prefix` parameter.
column_prefix : str, optional
Pack all columns with the given `column_prefix`.
This parameter is mutually exclusive with the `columns` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- There must be at least two columns to pack.
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = graphlab.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_prefix='category')
+----------+--------------------+
| business | X2 |
+----------+--------------------+
| 1 | [1, 1, None, 1] |
| 2 | [None, 1, 1, 1] |
| 3 | [1, None, 1, None] |
| 4 | [None, 1, None, 1] |
+----------+--------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
... new_column_name='category')
+----------+--------------------------------+
| business | category |
+----------+--------------------------------+
| 1 | {'food': 1, 'shop': 1, 're ... |
| 2 | {'food': 1, 'shop': 1, 'se ... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'food': 1, 'shop': 1} |
+----------+--------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
remove_prefix=False)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | {'category.retail': 1, 'ca ... |
| 2 | {'category.food': 1, 'cate ... |
| 3 | {'category.retail': 1, 'ca ... |
| 4 | {'category.food': 1, 'cate ... |
+----------+--------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(columns = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> sf.pack_columns(column_prefix="category", dtype=array.array,
... fill_na=0)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | array('d', [1.0, 1.0, 0.0, ... |
| 2 | array('d', [0.0, 1.0, 1.0, ... |
| 3 | array('d', [1.0, 0.0, 1.0, ... |
| 4 | array('d', [0.0, 1.0, 0.0, ... |
+----------+--------------------------------+
[4 rows x 2 columns]
"""
if columns != None and column_prefix != None:
raise ValueError("'columns' and 'column_prefix' parameter cannot be given at the same time.")
if new_column_name == None and column_prefix != None:
new_column_name = column_prefix
if column_prefix != None:
if type(column_prefix) != str:
raise TypeError("'column_prefix' must be a string")
columns = [name for name in self.column_names() if name.startswith(column_prefix)]
if len(columns) == 0:
raise ValueError("There is no column starts with prefix '" + column_prefix + "'")
elif columns == None:
columns = self.column_names()
else:
if not hasattr(columns, '__iter__'):
raise TypeError("columns must be an iterable type")
column_names = set(self.column_names())
for column in columns:
if (column not in column_names):
raise ValueError("Current SFrame has no column called '" + str(column) + "'.")
# check duplicate names
if len(set(columns)) != len(columns):
raise ValueError("There is duplicate column names in columns parameter")
if (len(columns) <= 1):
raise ValueError("Please provide at least two columns to pack")
if (dtype not in (dict, list, array.array)):
raise ValueError("Resulting dtype has to be one of dict/array.array/list type")
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na != None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all columns have to be numeric type
for column in columns:
if self[column].dtype() not in (int, float):
raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type")
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (dtype == dict) and (column_prefix != None) and (remove_prefix == True):
size_prefix = len(column_prefix)
first_char = set([c[size_prefix:size_prefix+1] for c in columns])
if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']):
dict_keys = [name[size_prefix+1:] for name in columns]
else:
dict_keys = [name[size_prefix:] for name in columns]
else:
dict_keys = columns
rest_columns = [name for name in self.column_names() if name not in columns]
if new_column_name != None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError("Current SFrame already contains a column name " + new_column_name)
else:
new_column_name = ""
_mt._get_metric_tracker().track('sframe.pack_columns')
ret_sa = None
with cython_context():
ret_sa = SArray(_proxy=self.__proxy__.pack_columns(columns, dict_keys, dtype, fill_na))
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name)
return new_sf
def split_datetime(self, expand_column, column_name_prefix=None, limit=None, tzone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`graphlab.SArray.split_datetim()`
Parameters
----------
expand_column : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit : list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if expand_column not in self.column_names():
raise KeyError("column '" + expand_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = expand_column
new_sf = self[expand_column].split_datetime(column_name_prefix, limit, tzone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != expand_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.split_datetime')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def unpack(self, unpack_column, column_name_prefix=None, column_types=None,
na_value=None, limit=None):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~graphlab.SArray.unpack()`.
Parameters
----------
unpack_column : str
Name of the unpacked column
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+----+-----------------------------+
| id | friends |
+----+-----------------------------+
| 1 | array('d', [1.0, 2.0, 3.0]) |
| 2 | array('d', [2.0, 3.0, 4.0]) |
| 3 | array('d', [3.0, 4.0, 5.0]) |
+----+-----------------------------+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
"""
if unpack_column not in self.column_names():
raise KeyError("column '" + unpack_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = unpack_column
new_sf = self[unpack_column].unpack(column_name_prefix, column_types, na_value, limit)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != unpack_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.unpack')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def stack(self, column_name, new_column_name=None, drop_na=False):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The new SFrame includes the newly created column and all columns other
than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = graphlab.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set dropna=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = graphlab.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+------+--------+
| user | friend |
+------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype()
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
if (new_column_name != None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val != None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
_mt._get_metric_tracker().track('sframe.stack')
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name, new_column_name, new_column_type, drop_na))
def unstack(self, column, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column`` is a numeric column, the result will be of
array.array type. If ``column`` is a non-numeric column, the new column
will be of list type. If ``column`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~graphlab.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = graphlab.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = graphlab.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='friends')
+------+-----------------------------+
| user | friends |
+------+-----------------------------+
| 3 | array('d', [5.0]) |
| 1 | array('d', [2.0, 4.0, 3.0]) |
| 2 | array('d', [5.0, 6.0, 4.0]) |
| 4 | array('d', [2.0, 3.0]) |
+------+-----------------------------+
[4 rows x 2 columns]
"""
if (type(column) != str and len(column) != 2):
raise TypeError("'column' parameter has to be either a string or a list of two strings.")
_mt._get_metric_tracker().track('sframe.unstack')
with cython_context():
if type(column) == str:
key_columns = [i for i in self.column_names() if i != column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name : graphlab.aggregate.CONCAT(column)})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column))
elif len(column) == 2:
key_columns = [i for i in self.column_names() if i not in column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name:graphlab.aggregate.CONCAT(column[0], column[1])})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column[0], column[1]))
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(),{})
def sort(self, sort_columns, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
sort_columns : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `sort_columns` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = graphlab.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate sort_columns
if (type(sort_columns) == str):
sort_column_names = [sort_columns]
elif (type(sort_columns) == list):
if (len(sort_columns) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in sort_columns])
if (len(first_param_types) != 1):
raise ValueError("sort_columns element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in sort_columns]
sort_column_orders = [i[1] for i in sort_columns]
elif(first_param_type == str):
sort_column_names = sort_columns
else:
raise TypeError("sort_columns type is not supported")
else:
raise TypeError("sort_columns type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype() not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
_mt._get_metric_tracker().track('sframe.sort')
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders))
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False))
def dropna_split(self, columns=None, how='any'):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~graphlab.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~graphlab.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna_split')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ['any','all']:
raise ValueError("Must specify 'any' or 'all'")
if how == 'all':
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column) is not str:
raise TypeError("Must give column name as a str")
ret = self[self.column_names()]
ret[column] = ret[column].fillna(value)
return ret
def add_row_number(self, column_name='id', start=0):
"""
Returns a new SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.add_row_number')
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name)
new_sf.add_columns(self)
return new_sf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_cols())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._proxy = value
| agpl-3.0 |
petebachant/scipy | scipy/signal/windows.py | 32 | 53971 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
arahuja/scikit-learn | sklearn/metrics/pairwise.py | 13 | 41710 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=True)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
kdebrab/pandas | pandas/core/base.py | 1 | 40146 | """
Base and utility classes for pandas objects.
"""
import warnings
import textwrap
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.core.dtypes.common import (
is_datetimelike,
is_object_dtype,
is_list_like,
is_scalar,
is_extension_type,
is_extension_array_dtype)
from pandas.util._validators import validate_bool_kwarg
from pandas.errors import AbstractMethodError
from pandas.core import common as com, algorithms
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
from pandas.compat import PYPY
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.accessor import DirNamesMixin
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (getattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
getattr(self, key, None) is not None)):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.all: 'all',
np.any: 'any',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif isinstance(obj, ABCDataFrame) and \
k not in obj.columns:
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries)
for r in compat.itervalues(result))
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
for r in compat.itervalues(result))
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com._get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.values._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def max(self):
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com._maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return iter(self.tolist())
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isna(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _map_values(self, mapper, na_action=None):
"""An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, 'values', values)
if na_action == 'ignore':
def map_f(values, f):
return lib.map_infer_mask(values, f,
isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.count: number of non-NA elements in a DataFrame
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.values)
return v
@Substitution(
values='', order='', size_hint='',
sort=textwrap.dedent("""\
sort : boolean, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""))
@Appender(algorithms._shared_docs['factorize'])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'], side='right')
array([3])
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/frame/test_axis_select_reindex.py | 2 | 43348 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isna)
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas.errors import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
assert obj.index.name == 'first'
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
pytest.raises(ValueError, df.drop, ['g'])
pytest.raises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
# GH 16398
dropped = df.drop([], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
pytest.raises(ValueError, simple.drop, 5)
pytest.raises(ValueError, simple.drop, 'C', 1)
pytest.raises(ValueError, simple.drop, [1, 5])
pytest.raises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.loc[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's (GH12392)
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.drop('a')
res2 = df.drop(index='a')
tm.assert_frame_equal(res1, res2)
res1 = df.drop('d', 1)
res2 = df.drop(columns='d')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(labels='e', axis=1)
res2 = df.drop(columns='e')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0)
res2 = df.drop(index=['a'])
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
res2 = df.drop(index=['a'], columns=['d'])
tm.assert_frame_equal(res1, res2)
with pytest.raises(ValueError):
df.drop(labels='a', index='b')
with pytest.raises(ValueError):
df.drop(labels='a', columns='b')
with pytest.raises(ValueError):
df.drop(axis=1)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(newFrame):
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(nonContigFrame):
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
assert newFrame.index is self.frame.index
# length zero
newFrame = self.frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(self.frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
assert len(newFrame.index) == len(self.frame.index)
assert len(newFrame.columns) == len(self.frame.columns)
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
tm.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
assert result is not self.frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
assert df.index.name == 'iname'
df = df.reindex(Index(np.arange(10), name='tmpname'))
assert df.index.name == 'tmpname'
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
assert smaller['A'].dtype == np.int64
bigger = smaller.reindex(self.intframe.index)
assert bigger['A'].dtype == np.float64
smaller = self.intframe.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
tm.assert_series_equal(new_frame['B'], self.frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
new_frame = self.frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method='ffill')
expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method='bfill')
expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
pytest.raises(ValueError, df.reindex, index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]},
index=[0, 1, 3])
result = df.reindex([0, 1, 3])
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis='index')
assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5],
"C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]})
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis=1)
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis='index')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis='columns')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis='columns')
with tm.assert_raises_regex(TypeError, 'Cannot specify all'):
df.reindex([0, 1], [0], ['A'])
# Mixing styles
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with tm.assert_raises_regex(TypeError, "Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
# Duplicates
with tm.assert_raises_regex(TypeError, "multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=['A'])
expected = pd.DataFrame({"A": [1, 2]})
assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(['b', 'a'], ['e', 'd'])
assert 'reindex' in str(m[0].message)
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align(self):
af, bf = self.frame.align(self.frame)
assert af._data is not self.frame._data
af, bf = self.frame.align(self.frame, copy=False)
assert af._data is self.frame._data
# axis = 0
other = self.frame.iloc[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.iloc[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, self.frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
self.frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
assert isinstance(right, Series)
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.iloc[0:4, :10]
right = self.frame.iloc[2:, 6:]
empty = self.frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# Items
filtered = self.frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
assert len(filtered.columns) == 2
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
# pass in None
with tm.assert_raises_regex(TypeError, 'Must pass'):
self.frame.filter()
with tm.assert_raises_regex(TypeError, 'Must pass'):
self.frame.filter(items=None)
with tm.assert_raises_regex(TypeError, 'Must pass'):
self.frame.filter(axis=1)
# test mutually exclusive arguments
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$')
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
with tm.assert_raises_regex(TypeError, 'mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi')
# objects
filtered = self.mixed_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
@pytest.mark.parametrize('name,expected', [
('a', DataFrame({u'a': [1, 2]})),
(u'a', DataFrame({u'a': [1, 2]})),
(u'あ', DataFrame({u'あ': [3, 4]}))
])
def test_filter_unicode(self, name, expected):
# GH13101
df = DataFrame({u'a': [1, 2], u'あ': [3, 4]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
@pytest.mark.parametrize('name', ['a', u'a'])
def test_filter_bytestring(self, name):
# GH13101
df = DataFrame({b'a': [1, 2], b'b': [3, 4]})
expected = DataFrame({b'a': [1, 2]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
# deprecated: gh-12410
f = lambda x: x.weekday() == 2
index = self.tsframe.index[[f(x) for x in self.tsframe.index]]
expected_weekdays = self.tsframe.reindex(index=index)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.tsframe.select(f, axis=0)
assert_frame_equal(result, expected_weekdays)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# replacement
f = lambda x: x.weekday == 2
result = self.tsframe.loc(axis=0)[f(self.tsframe.index)]
assert_frame_equal(result, expected_weekdays)
crit = lambda x: x in ['B', 'D']
result = self.frame.loc(axis=1)[(self.frame.columns.map(crit))]
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# doc example
df = DataFrame({'A': [1, 2, 3]}, index=['foo', 'bar', 'baz'])
crit = lambda x: x in ['bar', 'baz']
with tm.assert_produces_warning(FutureWarning):
expected = df.select(crit)
result = df.loc[df.index.map(crit)]
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=True, axis=0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=False, axis=0)
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
pytest.raises(IndexError, df.take, [3, 1, 2, 30], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, -31], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, 5], axis=1)
pytest.raises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=lrange(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
newFrame = self.frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(2), columns=lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
| bsd-3-clause |
rgommers/scipy | scipy/interpolate/interpolate.py | 10 | 99598 | __all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
ravel, poly1d, asarray, intp)
import scipy.special as spec
from scipy.special import comb
from scipy._lib._util import prod
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e., f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in range(M):
pt = poly1d(w[j])
for k in range(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d:
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)`` which returns a scalar value `z`. This class returns a
function whose call method uses spline interpolation to find the value
of new points.
If `x` and `y` represent a regular grid, consider using
`RectBivariateSpline`.
If `z` is a vector value, consider using `interpn`.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multidimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multidimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated via nearest-neighbor extrapolation.
See Also
--------
RectBivariateSpline :
Much faster 2-D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : 1-D version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
interpolation_types = {'linear': 1, 'cubic': 3, 'quintic': 5}
try:
kx = ky = interpolation_types[kind]
except KeyError as e:
raise ValueError(
f"Unsupported interpolation type {repr(kind)}, must be "
f"either of {', '.join(map(repr, interpolation_types))}."
) from e
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1-D array
x-coordinates of the mesh on which to interpolate.
y : 1-D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2-D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x, kind="mergesort")
y = np.sort(y, kind="mergesort")
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, str) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string or as an integer
specifying the order of the spline interpolator to use.
The string has to be one of 'linear', 'nearest', 'nearest-up', 'zero',
'slinear', 'quadratic', 'cubic', 'previous', or 'next'. 'zero',
'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of
zeroth, first, second or third order; 'previous' and 'next' simply
return the previous or next value of the point; 'nearest-up' and
'nearest' differ when interpolating half-integers (e.g. 0.5, 1.5)
in that 'nearest-up' rounds up and 'nearest' rounds down. Default
is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless ``fill_value="extrapolate"``.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Attributes
----------
fill_value
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Notes
-----
Calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Input values `x` and `y` must be convertible to `float` values like
`int` or `float`.
If the values in `x` are not unique, the resulting behavior is
undefined and specific to the choice of `kind`, i.e., changing
`kind` will change the behavior for duplicates.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1-D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest', 'nearest-up', 'previous',
'next'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x, kind="mergesort")
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: https://docs.python.org/reference/datamodel.html
if kind in ('linear', 'nearest', 'nearest-up', 'previous', 'next'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self._side = 'left'
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'nearest-up':
# Do division before addition to prevent possible integer
# overflow
self._side = 'right'
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'previous':
# Side for np.searchsorted and index for clipping
self._side = 'left'
self._ind = 0
# Move x by one floating point value to the left
self._x_shift = np.nextafter(self.x, -np.inf)
self._call = self.__class__._call_previousnext
elif kind == 'next':
self._side = 'right'
self._ind = 1
# Move x by one floating point value to the right
self._x_shift = np.nextafter(self.x, np.inf)
self._call = self.__class__._call_previousnext
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
mask = np.isnan(self.x)
if mask.any():
sx = self.x[~mask]
if sx.size == 0:
raise ValueError("`x` array is all-nan")
xx = np.linspace(np.nanmin(self.x),
np.nanmax(self.x),
len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
"""The fill value."""
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the original data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbor interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbor
x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_previousnext(self, x_new):
"""Use previous/next neighbor of x_new, y_new = f(x_new)."""
# 1. Get index of left/right value
x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
# 2. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(1-self._ind,
len(self.x)-self._ind).astype(intp)
# 3. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices+self._ind-1]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase:
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
``self.x`` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
``self.x`` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("Shapes of x {} and c {} are incompatible"
.format(x.shape, c.shape))
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("Shapes of c {} and self.c {} are incompatible"
.format(c.shape, self.c.shape))
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals.
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-D array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e., compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e., compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.solve()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in NumPy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in range(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(bp, BPoly):
raise TypeError(".from_bernstein_basis only accepts BPoly instances. "
"Got %s instead." % type(bp))
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-D array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature,
see for example [1]_ [2]_ [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e., compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e., compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if not isinstance(pp, PPoly):
raise TypeError(".from_power_basis only accepts PPoly instances. "
"Got %s instead." % type(pp))
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1-D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
``x = 1`` and ``x = 2``.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError as e:
raise ValueError(
"Using a 1-D array for y? Please .reshape(-1, 1)."
) from e
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (int, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on ``[xa, xb]`` and having the values and derivatives at the
endpoints `xa` and `xb` as specified by `ya`` and `yb`.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of `ya` and `yb` are `na` and `nb`, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at `xa`. `ya[0]` is the value of the function, and
`ya[i]` for ``i > 0`` is the value of the ``i``th derivative.
yb : array_like
Derivatives at `xb`.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At ``x = xb`` it's the same with ``a = n - q``.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('Shapes of ya {} and yb {} are incompatible'
.format(ya.shape, yb.shape))
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1-D
d : integer
Returns
-------
array
coefficient array, 1-D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly:
"""
Piecewise tensor product polynomial
The value at point ``xp = (x', y', z', ...)`` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
derivative
antiderivative
integrate
integrate_1d
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1-D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[tuple(sl)]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[tuple(sl)]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1-D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1-D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1D, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1-D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator:
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbor interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3-D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> xg, yg ,zg = np.meshgrid(x, y, z, indexing='ij', sparse=True)
>>> data = f(xg, yg, zg)
``data`` is now a 3-D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a regular 3-D grid:
>>> from scipy.interpolate import interpn
>>> def value_func_3d(x, y, z):
... return 2 * x + 3 * y - z
>>> x = np.linspace(0, 4, 5)
>>> y = np.linspace(0, 5, 6)
>>> z = np.linspace(0, 6, 7)
>>> points = (x, y, z)
>>> values = value_func_3d(*np.meshgrid(*points, indexing='ij'))
Evaluate the interpolating function at a point
>>> point = np.array([2.21, 3.12, 1.15])
>>> print(interpn(points, values, point))
[12.63]
See also
--------
NearestNDInterpolator : Nearest neighbor interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method splinef2d can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method splinef2d does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method splinef2d can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
if bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in range(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
| bsd-3-clause |
pbrod/numpy | numpy/core/code_generators/ufunc_docstrings.py | 7 | 106598 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
import textwrap
docdict = {}
def get(name):
return docdict.get(name)
# common parameter text to all ufuncs
subst = {
'PARAMS': textwrap.dedent("""
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
"broadcastable to a common\n shape (which becomes "
"the shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
skip = (
# gufuncs do not use the OUT_SCALAR replacement strings
'matmul',
# clip has 3 inputs, which is not handled by this
'clip',
)
if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
for k, v in subst.items():
doc = doc.replace('$' + k, v)
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
``np.abs`` is a shorthand for this function.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
$OUT_SCALAR_1
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
The `abs` function can be used as a shorthand for ``np.absolute`` on
ndarrays.
>>> x = np.array([-1.2, 1.2])
>>> abs(x)
array([1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added.
$BROADCASTABLE_2
$PARAMS
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
The ``+`` operator can be used as a shorthand for ``np.add`` on ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 + x2
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
$PARAMS
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi].
$OUT_SCALAR_1
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``cos(z) = x``. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts ``[-inf, -1]`` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in ``[-pi, pi]`` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
$PARAMS
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
$OUT_SCALAR_1
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
$PARAMS
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
$OUT_SCALAR_1
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [``1j, infj``] and [``-1j, -infj``] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates.
$BROADCASTABLE_2
$PARAMS
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
$OUT_SCALAR_2
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Array of the same shape as `x`.
$OUT_SCALAR_1
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that ``tanh(z) = x``. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
https://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True])
The ``&`` operator can be used as a shorthand for ``np.bitwise_and`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([3, 14, 16])
>>> x1 & x2
array([ 2, 4, 16])
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
The ``|`` operator can be used as a shorthand for ``np.bitwise_or`` on
ndarrays.
>>> x1 = np.array([2, 5, 255])
>>> x2 = np.array([4, 4, 4])
>>> x1 | x2
array([ 6, 5, 255])
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_2
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False])
The ``^`` operator can be used as a shorthand for ``np.bitwise_xor`` on
ndarrays.
>>> x1 = np.array([True, True])
>>> x2 = np.array([False, True])
>>> x1 ^ x2
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
``i >= x``. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
$OUT_SCALAR_1
See Also
--------
floor, trunc, rint, fix
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, floor, rint, fix
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
Notes
-----
`conj` is an alias for `conjugate`:
>>> np.conj is np.conjugate
True
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding cosine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array of same shape as `x`.
$OUT_SCALAR_1
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
$PARAMS
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
$OUT_SCALAR_1
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in degrees.
$OUT_SCALAR_1
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x1 < 0
heaviside(x1, x2) = x2 if x1 == 0
1 if x1 > 0
where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x1 : array_like
Input values.
x2 : array_like
The value of the function when x1 is 0.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The output array, element-wise Heaviside step function of `x1`.
$OUT_SCALAR_2
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise.
$OUT_SCALAR_2
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
The ``/`` operator can be used as a shorthand for ``np.divide`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = 2 * np.ones(3)
>>> x1 / x2
array([[0. , 0.5, 1. ],
[1.5, 2. , 2.5],
[3. , 3.5, 4. ]])
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False])
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True])
The ``==`` operator can be used as a shorthand for ``np.equal`` on
ndarrays.
>>> a = np.array([2, 4, 6])
>>> b = np.array([2, 4, 2])
>>> a == b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
$OUT_SCALAR_1
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise 2 to the power `x`.
$OUT_SCALAR_1
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise exponential minus one: ``out = exp(x) - 1``.
$OUT_SCALAR_1
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
$PARAMS
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
$OUT_SCALAR_1
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
$OUT_SCALAR_1
See Also
--------
ceil, trunc, rint, fix
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", where
``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`. The "floor-towards-zero"
function is called ``fix`` in NumPy.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
$OUT_SCALAR_2
See Also
--------
remainder : Remainder complementary to floor_divide.
divmod : Simultaneous floor division and remainder.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
The ``//`` operator can be used as a shorthand for ``np.floor_divide``
on ndarrays.
>>> x1 = np.array([1., 2., 3., 4.])
>>> x1 // 2.5
array([0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
$OUT_SCALAR_2
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False])
The ``>`` operator can be used as a shorthand for ``np.greater`` on
ndarrays.
>>> a = np.array([4, 2])
>>> b = np.array([2, 2])
>>> a > b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : bool or ndarray of bool
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False])
The ``>=`` operator can be used as a shorthand for ``np.greater_equal``
on ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a >= b
array([ True, True, False])
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
$BROADCASTABLE_2
$PARAMS
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
$OUT_SCALAR_2
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
$PARAMS
Returns
-------
out : ndarray or scalar
Result.
$OUT_SCALAR_1
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
The result depends on the bit-width:
>>> x = np.invert(np.array(13, dtype=np.uint16))
>>> x
65522
>>> np.binary_repr(x, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(np.array([True, False]))
array([False, True])
The ``~`` operator can be used as a shorthand for ``np.invert`` on
ndarrays.
>>> x1 = np.array([True, False])
>>> ~x1
array([False, True])
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray, bool
True where ``x`` is not positive infinity, negative infinity,
or NaN; false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
$PARAMS
Returns
-------
y : bool (scalar) or boolean ndarray
True where ``x`` is positive or negative infinity, false otherwise.
$OUT_SCALAR_1
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaN, false otherwise.
$OUT_SCALAR_1
See Also
--------
isinf, isneginf, isposinf, isfinite, isnat
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False])
""")
add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like
Input array with datetime or timedelta data type.
$PARAMS
Returns
-------
y : ndarray or bool
True where ``x`` is NaT, false otherwise.
$OUT_SCALAR_1
See Also
--------
isnan, isinf, isneginf, isposinf, isfinite
Examples
--------
>>> np.isnat(np.datetime64("NaT"))
True
>>> np.isnat(np.datetime64("2016-01-01"))
False
>>> np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]"))
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
$OUT_SCALAR_2
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
Note that the dtype of the second argument may change the dtype of the
result and can lead to unexpected results in some cases (see
:ref:`Casting Rules <ufuncs.casting>`):
>>> a = np.left_shift(np.uint8(255), 1) # Expect 254
>>> print(a, type(a)) # Unexpected result due to upcasting
510 <class 'numpy.int64'>
>>> b = np.left_shift(np.uint8(255), np.uint8(1))
>>> print(b, type(b))
254 <class 'numpy.uint8'>
The ``<<`` operator can be used as a shorthand for ``np.left_shift`` on
ndarrays.
>>> x1 = 5
>>> x2 = np.array([1, 2, 3])
>>> x1 << x2
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False])
The ``<`` operator can be used as a shorthand for ``np.less`` on ndarrays.
>>> a = np.array([1, 2])
>>> b = np.array([2, 2])
>>> a < b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True])
The ``<=`` operator can be used as a shorthand for ``np.less_equal`` on
ndarrays.
>>> a = np.array([4, 2, 1])
>>> b = np.array([2, 2, 2])
>>> a <= b
array([False, True, True])
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
$PARAMS
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
$OUT_SCALAR_1
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
$OUT_SCALAR_1
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., nan])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
$OUT_SCALAR_1
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
$OUT_SCALAR_2
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
$BROADCASTABLE_2
$PARAMS
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
$OUT_SCALAR_2
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
$OUT_SCALAR_1
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False])
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False])
The ``&`` operator can be used as a shorthand for ``np.logical_and`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a & b
array([False, False])
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
$OUT_SCALAR_1
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False])
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True])
The ``|`` operator can be used as a shorthand for ``np.logical_or`` on
boolean ndarrays.
>>> a = np.array([True, False])
>>> b = np.array([False, False])
>>> a | b
array([ True, False])
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True])
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]])
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., nan])
""")
add_newdoc('numpy.core.umath', 'clip',
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : array_like
Minimum value.
a_max : array_like
Maximum value.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
$PARAMS
See Also
--------
numpy.clip :
Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
dispatching to one of `~numpy.core.umath.clip`,
`~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
""")
add_newdoc('numpy.core.umath', 'matmul',
"""
Matrix product of two arrays.
Parameters
----------
x1, x2 : array_like
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
provided or None, a freshly-allocated array is returned.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.16
Now handles ufunc kwargs
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
ValueError
If the last dimension of `x1` is not the same size as
the second-to-last dimension of `x2`.
If a scalar value is passed in.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
``matmul`` differs from ``dot`` in two important ways:
- Multiplication by scalars is not allowed, use ``*`` instead.
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the ``@`` operator introduced
in Python 3.5 following :pep:`465`.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
98
>>> sum(a[0, 1, :] * b[0 , :, 1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: matmul: Input operand 1 does not have enough dimensions ...
The ``@`` operator can be used as a shorthand for ``np.matmul`` on
ndarrays.
>>> x1 = np.array([2j, 3j])
>>> x2 = np.array([2j, 3j])
>>> x1 @ x2
(-13+0j)
.. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y1 : ndarray
Fractional part of `x`.
$OUT_SCALAR_1
y2 : ndarray
Integral part of `x`.
$OUT_SCALAR_1
Notes
-----
For integer input the return values are floats.
See Also
--------
divmod : ``divmod(x, 1)`` is equivalent to ``modf`` with the return values
switched, except it always has a positive remainder.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
The ``*`` operator can be used as a shorthand for ``np.multiply`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 * x2
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
$PARAMS
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
$OUT_SCALAR_1
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
The unary ``-`` operator can be used as a shorthand for ``np.negative`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> -x1
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'positive',
"""
Numerical positive, element-wise.
.. versionadded:: 1.13.0
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = +x`.
$OUT_SCALAR_1
Notes
-----
Equivalent to `x.copy()`, but only defined for types that support
arithmetic.
Examples
--------
>>> x1 = np.array(([1., -1.]))
>>> np.positive(x1)
array([ 1., -1.])
The unary ``+`` operator can be used as a shorthand for ``np.positive`` on
ndarrays.
>>> x1 = np.array(([1., -1.]))
>>> +x1
array([ 1., -1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
Output array, element-wise comparison of `x1` and `x2`.
Typically of type bool, unless ``dtype=object`` is passed.
$OUT_SCALAR_2
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True])
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]])
The ``!=`` operator can be used as a shorthand for ``np.not_equal`` on
ndarrays.
>>> a = np.array([1., 2.])
>>> b = np.array([1., 3.])
>>> a != b
array([False, True])
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in an array.
>>> x1 = np.arange(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
The ``**`` operator can be used as a shorthand for ``np.power`` on
ndarrays.
>>> x2 = np.array([1, 2, 3, 3, 2, 1])
>>> x1 = np.arange(6)
>>> x1 ** x2
array([ 0, 1, 8, 27, 16, 5])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
$OUT_SCALAR_2
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding radian values.
$OUT_SCALAR_1
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
$PARAMS
Returns
-------
y : ndarray
The corresponding angle in radians.
$OUT_SCALAR_1
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
Return array.
$OUT_SCALAR_1
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
is ``mod``.
.. warning::
This should not be confused with:
* Python 3.7's `math.remainder` and C's ``remainder``, which
computes the IEEE remainder, which are the complement to
``round(x1 / x2)``.
* The MATLAB ``rem`` function and or the C ``%`` operator which is the
complement to ``int(x1 / x2)``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
``mod`` is an alias of ``remainder``.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
The ``%`` operator can be used as a shorthand for ``np.remainder`` on
ndarrays.
>>> x1 = np.arange(7)
>>> x1 % 5
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'divmod',
"""
Return element-wise quotient and remainder simultaneously.
.. versionadded:: 1.13.0
``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster
because it avoids redundant work. It is used to implement the Python
built-in function ``divmod`` on NumPy arrays.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out1 : ndarray
Element-wise quotient resulting from floor division.
$OUT_SCALAR_2
out2 : ndarray
Element-wise remainder from floor division.
$OUT_SCALAR_2
See Also
--------
floor_divide : Equivalent to Python's ``//`` operator.
remainder : Equivalent to Python's ``%`` operator.
modf : Equivalent to ``divmod(x, 1)`` for positive ``x`` with the return
values switched.
Examples
--------
>>> np.divmod(np.arange(5), 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
The `divmod` function can be used as a shorthand for ``np.divmod`` on
ndarrays.
>>> x = np.arange(5)
>>> divmod(x, 3)
(array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1]))
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
$OUT_SCALAR_2
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
The ``>>`` operator can be used as a shorthand for ``np.right_shift`` on
ndarrays.
>>> x1 = 10
>>> x2 = np.array([1,2,3])
>>> x1 >> x2
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
$OUT_SCALAR_1
See Also
--------
fix, ceil, floor, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
$PARAMS
Returns
-------
y : ndarray
The sign of `x`.
$OUT_SCALAR_1
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
$PARAMS
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
$OUT_SCALAR_1
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False])
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
The next representable values of `x1` in the direction of `x2`.
$OUT_SCALAR_2
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True])
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x : array_like
Values to find the spacing of.
$PARAMS
Returns
-------
out : ndarray or scalar
The spacing of values of `x`.
$OUT_SCALAR_1
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
$PARAMS
Returns
-------
y : array_like
The sine of each element of x.
$OUT_SCALAR_1
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, np.inf])
array([ 2., nan, inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
$PARAMS
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
$OUT_SCALAR_1
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
$PARAMS
Returns
-------
out : ndarray or scalar
Element-wise `x*x`, of the same shape and dtype as `x`.
$OUT_SCALAR_1
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
The ``-`` operator can be used as a shorthand for ``np.subtract`` on
ndarrays.
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> x1 - x2
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
$PARAMS
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
$OUT_SCALAR_1
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
$BROADCASTABLE_2
$PARAMS
Returns
-------
out : ndarray or scalar
$OUT_SCALAR_2
Notes
-----
In Python, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
The ``/`` operator can be used as a shorthand for ``np.true_divide`` on
ndarrays.
>>> x = np.arange(5)
>>> x / 4
array([0. , 0.25, 0.5 , 0.75, 1. ])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
$PARAMS
Returns
-------
mantissa : ndarray
Floating values between -1 and 1.
$OUT_SCALAR_1
exponent : ndarray
Integer exponents of 2.
$OUT_SCALAR_1
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
$BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
$OUT_SCALAR_2
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
add_newdoc('numpy.core.umath', 'gcd',
"""
Returns the greatest common divisor of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The greatest common divisor of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
lcm : The lowest common multiple
Examples
--------
>>> np.gcd(12, 20)
4
>>> np.gcd.reduce([15, 25, 35])
5
>>> np.gcd(np.arange(6), 20)
array([20, 1, 2, 1, 4, 5])
""")
add_newdoc('numpy.core.umath', 'lcm',
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : array_like, int
Arrays of values.
$BROADCASTABLE_2
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
$OUT_SCALAR_2
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm.reduce([3, 12, 20])
60
>>> np.lcm.reduce([40, 12, 20])
120
>>> np.lcm(np.arange(6), 20)
array([ 0, 20, 20, 60, 20, 20])
""")
| bsd-3-clause |
keir-rex/zipline | zipline/history/history_container.py | 18 | 33931 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bisect import insort_left
from collections import namedtuple
from itertools import groupby, product
import logbook
import numpy as np
import pandas as pd
from six import itervalues, iteritems, iterkeys
from . history import HistorySpec
from zipline.finance.trading import with_environment
from zipline.utils.data import RollingPanel, _ensure_index
from zipline.utils.munge import ffill, bfill
logger = logbook.Logger('History Container')
# The closing price is referred to by multiple names,
# allow both for price rollover logic etc.
CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'})
def ffill_buffer_from_prior_values(freq,
field,
buffer_frame,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a buffer frame, falling back to the end-of-period values of a
digest frame if the buffer frame has leading NaNs.
"""
# convert to ndarray if necessary
digest_values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
digest_values = digest_frame.values
buffer_values = buffer_frame
if raw and isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids) and len(digest_values):
# If we have any leading nans in the buffer and we have a non-empty
# digest frame, use the oldest digest values as the initial buffer
# values.
buffer_values[0, nan_sids] = digest_values[-1, nan_sids]
nan_sids = pd.isnull(buffer_values[0])
if np.any(nan_sids):
# If we still have leading nans, fall back to the last known values
# from before the digest.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
buffer_values[0, nan_sids] = filler
if raw:
filled = ffill(buffer_values)
return filled
return buffer_frame.ffill()
def ffill_digest_frame_from_prior_values(freq,
field,
digest_frame,
pv_frame,
raw=False):
"""
Forward-fill a digest frame, falling back to the last known prior values if
necessary.
"""
# convert to ndarray if necessary
values = digest_frame
if raw and isinstance(digest_frame, pd.DataFrame):
values = digest_frame.values
nan_sids = pd.isnull(values[0])
if np.any(nan_sids):
# If we have any leading nans in the frame, use values from pv_frame to
# seed values for those sids.
key_loc = pv_frame.index.get_loc((freq.freq_str, field))
filler = pv_frame.values[key_loc, nan_sids]
values[0, nan_sids] = filler
if raw:
filled = ffill(values)
return filled
return digest_frame.ffill()
def freq_str_and_bar_count(history_spec):
"""
Helper for getting the frequency string and bar count from a history spec.
"""
return (history_spec.frequency.freq_str, history_spec.bar_count)
@with_environment()
def next_bar(spec, env):
"""
Returns a function that will return the next bar for a given datetime.
"""
if spec.frequency.unit_str == 'd':
if spec.frequency.data_frequency == 'minute':
return lambda dt: env.get_open_and_close(
env.next_trading_day(dt),
)[1]
else:
return env.next_trading_day
else:
return env.next_market_minute
def compute_largest_specs(history_specs):
"""
Maps a Frequency to the largest HistorySpec at that frequency from an
iterable of HistorySpecs.
"""
return {key: max(group, key=lambda f: f.bar_count)
for key, group in groupby(
sorted(history_specs, key=freq_str_and_bar_count),
key=lambda spec: spec.frequency)}
# tuples to store a change to the shape of a HistoryContainer
FrequencyDelta = namedtuple(
'FrequencyDelta',
['freq', 'buffer_delta'],
)
LengthDelta = namedtuple(
'LengthDelta',
['freq', 'delta'],
)
HistoryContainerDeltaSuper = namedtuple(
'HistoryContainerDelta',
['field', 'frequency_delta', 'length_delta'],
)
class HistoryContainerDelta(HistoryContainerDeltaSuper):
"""
A class representing a resize of the history container.
"""
def __new__(cls, field=None, frequency_delta=None, length_delta=None):
"""
field is a new field that was added.
frequency is a FrequencyDelta representing a new frequency was added.
length is a bar LengthDelta which is a frequency and a bar_count.
If any field is None, then no change occurred of that type.
"""
return super(HistoryContainerDelta, cls).__new__(
cls, field, frequency_delta, length_delta,
)
@property
def empty(self):
"""
Checks if the delta is empty.
"""
return (self.field is None and
self.frequency_delta is None and
self.length_delta is None)
def normalize_to_data_freq(data_frequency, dt):
if data_frequency == 'minute':
return dt
return pd.tslib.normalize_date(dt)
class HistoryContainer(object):
"""
Container for all history panels and frames used by an algoscript.
To be used internally by TradingAlgorithm, but *not* passed directly to the
algorithm.
Entry point for the algoscript is the result of `get_history`.
"""
VALID_FIELDS = {
'price', 'open_price', 'volume', 'high', 'low', 'close_price',
}
def __init__(self,
history_specs,
initial_sids,
initial_dt,
data_frequency,
bar_data=None):
"""
A container to hold a rolling window of historical data within a user's
algorithm.
Args:
history_specs (dict[Frequency:HistorySpec]): The starting history
specs that this container should be able to service.
initial_sids (set[Asset or Int]): The starting sids to watch.
initial_dt (datetime): The datetime to start collecting history from.
bar_data (BarData): If this container is being constructed during
handle_data, this is the BarData for the current bar to fill the
buffer with. If this is constructed elsewhere, it is None.
Returns:
An instance of a new HistoryContainer
"""
# History specs to be served by this container.
self.history_specs = history_specs
self.largest_specs = compute_largest_specs(
itervalues(self.history_specs)
)
# The set of fields specified by all history specs
self.fields = pd.Index(
sorted(set(spec.field for spec in itervalues(history_specs)))
)
self.sids = pd.Index(
sorted(set(initial_sids or []))
)
self.data_frequency = data_frequency
initial_dt = normalize_to_data_freq(self.data_frequency, initial_dt)
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(initial_dt, bar_data)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = pd.DataFrame(
data=None,
index=self.prior_values_index,
columns=self.prior_values_columns,
# Note: For bizarre "intricacies of the spaghetti that is pandas
# indexing logic" reasons, setting this dtype prevents indexing
# errors in update_last_known_values. This is safe for the time
# being because our only forward-fillable fields are floats. If we
# need to add a non-float-typed forward-fillable field, then we may
# find ourselves having to track down and fix a pandas bug.
dtype=np.float64,
)
_ffillable_fields = None
@property
def ffillable_fields(self):
if self._ffillable_fields is None:
fillables = self.fields.intersection(HistorySpec.FORWARD_FILLABLE)
self._ffillable_fields = fillables
return self._ffillable_fields
@property
def prior_values_index(self):
index_values = list(
product(
(freq.freq_str for freq in self.unique_frequencies),
# Only store prior values for forward-fillable fields.
self.ffillable_fields,
)
)
if index_values:
return pd.MultiIndex.from_tuples(index_values)
else:
# MultiIndex doesn't gracefully support empty input, so we return
# an empty regular Index if we have values.
return pd.Index(index_values)
@property
def prior_values_columns(self):
return self.sids
@property
def all_panels(self):
yield self.buffer_panel
for panel in self.digest_panels.values():
yield panel
@property
def unique_frequencies(self):
"""
Return an iterator over all the unique frequencies serviced by this
container.
"""
return iterkeys(self.largest_specs)
@with_environment()
def _add_frequency(self, spec, dt, data, env=None):
"""
Adds a new frequency to the container. This reshapes the buffer_panel
if needed.
"""
freq = spec.frequency
self.largest_specs[freq] = spec
new_buffer_len = 0
if freq.max_bars > self.buffer_panel.window_length:
# More bars need to be held in the buffer_panel to support this
# freq
if freq.data_frequency \
!= self.buffer_spec.frequency.data_frequency:
# If the data_frequencies are not the same, then we need to
# create a fresh buffer.
self.buffer_panel = self.create_buffer_panel(
dt, bar_data=data,
)
new_buffer_len = None
else:
# The frequencies are the same, we just need to add more bars.
self._resize_panel(
self.buffer_panel,
freq.max_bars,
dt,
self.buffer_spec.frequency,
)
new_buffer_len = freq.max_minutes
# update the current buffer_spec to reflect the new lenght.
self.buffer_spec.bar_count = new_buffer_len + 1
if spec.bar_count > 1:
# This spec has more than one bar, construct a digest panel for it.
self.digest_panels[freq] = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self.cur_window_starts[freq] = dt
self.cur_window_closes[freq] = freq.window_close(
self.cur_window_starts[freq]
)
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return FrequencyDelta(freq, new_buffer_len)
def _add_field(self, field):
"""
Adds a new field to the container.
"""
# self.fields is already sorted, so we just need to insert the new
# field in the correct index.
ls = list(self.fields)
insort_left(ls, field)
self.fields = pd.Index(ls)
# unset fillable fields cache
self._ffillable_fields = None
self._realign_fields()
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
return field
@with_environment()
def _add_length(self, spec, dt, env=None):
"""
Increases the length of the digest panel for spec.frequency. If this
does not have a panel, and one is needed; a digest panel will be
constructed.
"""
old_count = self.largest_specs[spec.frequency].bar_count
self.largest_specs[spec.frequency] = spec
delta = spec.bar_count - old_count
panel = self.digest_panels.get(spec.frequency)
if panel is None:
# The old length for this frequency was 1 bar, meaning no digest
# panel was held. We must construct a new one here.
panel = self._create_digest_panel(
dt, spec=spec, env=env,
)
else:
self._resize_panel(
panel, spec.bar_count - 1, dt, freq=spec.frequency, env=env,
)
self.digest_panels[spec.frequency] = panel
return LengthDelta(spec.frequency, delta)
@with_environment()
def _resize_panel(self, panel, size, dt, freq, env=None):
"""
Resizes a panel, fills the date_buf with the correct values.
"""
# This is the oldest datetime that will be shown in the current window
# of the panel.
oldest_dt = pd.Timestamp(panel.start_date, tz='utc',)
delta = size - panel.window_length
# Construct the missing dates.
missing_dts = self._create_window_date_buf(
delta, freq.unit_str, freq.data_frequency, oldest_dt,
)
panel.extend_back(missing_dts)
@with_environment()
def _create_window_date_buf(self,
window,
unit_str,
data_frequency,
dt,
env=None):
"""
Creates a window length date_buf looking backwards from dt.
"""
if unit_str == 'd':
# Get the properly key'd datetime64 out of the pandas Timestamp
if data_frequency != 'daily':
arr = env.open_close_window(
dt,
window,
offset=-window,
).market_close.astype('datetime64[ns]').values
else:
arr = env.open_close_window(
dt,
window,
offset=-window,
).index.values
return arr
else:
return env.market_minute_window(
env.previous_market_minute(dt),
window,
step=-1,
)[::-1].values
@with_environment()
def _create_panel(self, dt, spec, env=None):
"""
Constructs a rolling panel with a properly aligned date_buf.
"""
dt = normalize_to_data_freq(spec.frequency.data_frequency, dt)
window = spec.bar_count - 1
date_buf = self._create_window_date_buf(
window,
spec.frequency.unit_str,
spec.frequency.data_frequency,
dt,
env=env,
)
panel = RollingPanel(
window=window,
items=self.fields,
sids=self.sids,
initial_dates=date_buf,
)
return panel
@with_environment()
def _create_digest_panel(self,
dt,
spec,
window_starts=None,
window_closes=None,
env=None):
"""
Creates a digest panel, setting the window_starts and window_closes.
If window_starts or window_closes are None, then self.cur_window_starts
or self.cur_window_closes will be used.
"""
freq = spec.frequency
window_starts = window_starts if window_starts is not None \
else self.cur_window_starts
window_closes = window_closes if window_closes is not None \
else self.cur_window_closes
window_starts[freq] = freq.normalize(dt)
window_closes[freq] = freq.window_close(window_starts[freq])
return self._create_panel(dt, spec, env=env)
def ensure_spec(self, spec, dt, bar_data):
"""
Ensure that this container has enough space to hold the data for the
given spec. This returns a HistoryContainerDelta to represent the
changes in shape that the container made to support the new
HistorySpec.
"""
updated = {}
if spec.field not in self.fields:
updated['field'] = self._add_field(spec.field)
if spec.frequency not in self.largest_specs:
updated['frequency_delta'] = self._add_frequency(
spec, dt, bar_data,
)
if spec.bar_count > self.largest_specs[spec.frequency].bar_count:
updated['length_delta'] = self._add_length(spec, dt)
return HistoryContainerDelta(**updated)
def add_sids(self, to_add):
"""
Add new sids to the container.
"""
self.sids = pd.Index(
sorted(self.sids.union(_ensure_index(to_add))),
)
self._realign_sids()
def drop_sids(self, to_drop):
"""
Remove sids from the container.
"""
self.sids = pd.Index(
sorted(self.sids.difference(_ensure_index(to_drop))),
)
self._realign_sids()
def _realign_sids(self):
"""
Realign our constituent panels after adding or removing sids.
"""
self.last_known_prior_values = self.last_known_prior_values.reindex(
columns=self.sids,
)
for panel in self.all_panels:
panel.set_minor_axis(self.sids)
def _realign_fields(self):
self.last_known_prior_values = self.last_known_prior_values.reindex(
index=self.prior_values_index,
)
for panel in self.all_panels:
panel.set_items(self.fields)
@with_environment()
def create_digest_panels(self,
initial_sids,
initial_dt,
env=None):
"""
Initialize a RollingPanel for each unique panel frequency being stored
by this container. Each RollingPanel pre-allocates enough storage
space to service the highest bar-count of any history call that it
serves.
"""
# Map from frequency -> first/last minute of the next digest to be
# rolled for that frequency.
first_window_starts = {}
first_window_closes = {}
# Map from frequency -> digest_panels.
panels = {}
for freq, largest_spec in iteritems(self.largest_specs):
if largest_spec.bar_count == 1:
# No need to allocate a digest panel; this frequency will only
# ever use data drawn from self.buffer_panel.
first_window_starts[freq] = freq.normalize(initial_dt)
first_window_closes[freq] = freq.window_close(
first_window_starts[freq]
)
continue
dt = initial_dt
rp = self._create_digest_panel(
dt,
spec=largest_spec,
window_starts=first_window_starts,
window_closes=first_window_closes,
env=env,
)
panels[freq] = rp
return panels, first_window_starts, first_window_closes
def create_buffer_panel(self, initial_dt, bar_data):
"""
Initialize a RollingPanel containing enough minutes to service all our
frequencies.
"""
max_bars_needed = max(
freq.max_bars for freq in self.unique_frequencies
)
freq = '1m' if self.data_frequency == 'minute' else '1d'
spec = HistorySpec(
max_bars_needed + 1, freq, None, None, self.data_frequency,
)
rp = self._create_panel(
initial_dt, spec,
)
self.buffer_spec = spec
if bar_data is not None:
frame = self.frame_from_bardata(bar_data, initial_dt)
rp.add_frame(initial_dt, frame)
return rp
def convert_columns(self, values):
"""
If columns have a specific type you want to enforce, overwrite this
method and return the transformed values.
"""
return values
def digest_bars(self, history_spec, do_ffill):
"""
Get the last (history_spec.bar_count - 1) bars from self.digest_panel
for the requested HistorySpec.
"""
bar_count = history_spec.bar_count
if bar_count == 1:
# slicing with [1 - bar_count:] doesn't work when bar_count == 1,
# so special-casing this.
res = pd.DataFrame(index=[], columns=self.sids, dtype=float)
return res.values, res.index
field = history_spec.field
# Panel axes are (field, dates, sids). We want just the entries for
# the requested field, the last (bar_count - 1) data points, and all
# sids.
digest_panel = self.digest_panels[history_spec.frequency]
frame = digest_panel.get_current(field, raw=True)
if do_ffill:
# Do forward-filling *before* truncating down to the requested
# number of bars. This protects us from losing data if an illiquid
# stock has a gap in its price history.
filled = ffill_digest_frame_from_prior_values(
history_spec.frequency,
history_spec.field,
frame,
self.last_known_prior_values,
raw=True
# Truncate only after we've forward-filled
)
indexer = slice(1 - bar_count, None)
return filled[indexer], digest_panel.current_dates()[indexer]
else:
indexer = slice(1 - bar_count, None)
return frame[indexer, :], digest_panel.current_dates()[indexer]
def buffer_panel_minutes(self,
buffer_panel,
earliest_minute=None,
latest_minute=None,
raw=False):
"""
Get the minutes in @buffer_panel between @earliest_minute and
@latest_minute, inclusive.
@buffer_panel can be a RollingPanel or a plain Panel. If a
RollingPanel is supplied, we call `get_current` to extract a Panel
object.
If no value is specified for @earliest_minute, use all the minutes we
have up until @latest minute.
If no value for @latest_minute is specified, use all values up until
the latest minute.
"""
if isinstance(buffer_panel, RollingPanel):
buffer_panel = buffer_panel.get_current(start=earliest_minute,
end=latest_minute,
raw=raw)
return buffer_panel
# Using .ix here rather than .loc because loc requires that the keys
# are actually in the index, whereas .ix returns all the values between
# earliest_minute and latest_minute, which is what we want.
return buffer_panel.ix[:, earliest_minute:latest_minute, :]
def frame_from_bardata(self, data, algo_dt):
"""
Create a DataFrame from the given BarData and algo dt.
"""
data = data._data
frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan
for j, sid in enumerate(self.sids):
sid_data = data.get(sid)
if not sid_data:
continue
if algo_dt != sid_data['dt']:
continue
for i, field in enumerate(self.fields):
frame_data[i, j] = sid_data.get(field, np.nan)
return pd.DataFrame(
frame_data,
index=self.fields.copy(),
columns=self.sids.copy(),
)
def update(self, data, algo_dt):
"""
Takes the bar at @algo_dt's @data, checks to see if we need to roll any
new digests, then adds new data to the buffer panel.
"""
frame = self.frame_from_bardata(data, algo_dt)
self.update_last_known_values()
self.update_digest_panels(algo_dt, self.buffer_panel)
self.buffer_panel.add_frame(algo_dt, frame)
def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None):
"""
Check whether @algo_dt is greater than cur_window_close for any of our
frequencies. If so, roll a digest for that frequency using data drawn
from @buffer panel and insert it into the appropriate digest panels.
If @freq_filter is specified, only use the given data to update
frequencies on which the filter returns True.
This takes `buffer_panel` as an argument rather than using
self.buffer_panel so that this method can be used to add supplemental
data from an external source.
"""
for frequency in filter(freq_filter, self.unique_frequencies):
# We don't keep a digest panel if we only have a length-1 history
# spec for a given frequency
digest_panel = self.digest_panels.get(frequency, None)
while algo_dt > self.cur_window_closes[frequency]:
earliest_minute = self.cur_window_starts[frequency]
latest_minute = self.cur_window_closes[frequency]
minutes_to_process = self.buffer_panel_minutes(
buffer_panel,
earliest_minute=earliest_minute,
latest_minute=latest_minute,
raw=True
)
if digest_panel is not None:
# Create a digest from minutes_to_process and add it to
# digest_panel.
digest_frame = self.create_new_digest_frame(
minutes_to_process,
self.fields,
self.sids
)
digest_panel.add_frame(
latest_minute,
digest_frame,
self.fields,
self.sids
)
# Update panel start/close for this frequency.
self.cur_window_starts[frequency] = \
frequency.next_window_start(latest_minute)
self.cur_window_closes[frequency] = \
frequency.window_close(self.cur_window_starts[frequency])
def frame_to_series(self, field, frame, columns=None):
"""
Convert a frame with a DatetimeIndex and sid columns into a series with
a sid index, using the aggregator defined by the given field.
"""
if isinstance(frame, pd.DataFrame):
columns = frame.columns
frame = frame.values
if not len(frame):
return pd.Series(
data=(0 if field == 'volume' else np.nan),
index=columns,
).values
if field in ['price', 'close_price']:
# shortcircuit for full last row
vals = frame[-1]
if np.all(~np.isnan(vals)):
return vals
return ffill(frame)[-1]
elif field == 'open_price':
return bfill(frame)[0]
elif field == 'volume':
return np.nansum(frame, axis=0)
elif field == 'high':
return np.nanmax(frame, axis=0)
elif field == 'low':
return np.nanmin(frame, axis=0)
else:
raise ValueError("Unknown field {}".format(field))
def aggregate_ohlcv_panel(self,
fields,
ohlcv_panel,
items=None,
minor_axis=None):
"""
Convert an OHLCV Panel into a DataFrame by aggregating each field's
frame into a Series.
"""
vals = ohlcv_panel
if isinstance(ohlcv_panel, pd.Panel):
vals = ohlcv_panel.values
items = ohlcv_panel.items
minor_axis = ohlcv_panel.minor_axis
data = [
self.frame_to_series(
field,
vals[items.get_loc(field)],
minor_axis
)
for field in fields
]
return np.array(data)
def create_new_digest_frame(self, buffer_minutes, items=None,
minor_axis=None):
"""
Package up minutes in @buffer_minutes into a single digest frame.
"""
return self.aggregate_ohlcv_panel(
self.fields,
buffer_minutes,
items=items,
minor_axis=minor_axis
)
def update_last_known_values(self):
"""
Store the non-NaN values from our oldest frame in each frequency.
"""
ffillable = self.ffillable_fields
if not len(ffillable):
return
for frequency in self.unique_frequencies:
digest_panel = self.digest_panels.get(frequency, None)
if digest_panel:
oldest_known_values = digest_panel.oldest_frame(raw=True)
else:
oldest_known_values = self.buffer_panel.oldest_frame(raw=True)
oldest_vals = oldest_known_values
oldest_columns = self.fields
for field in ffillable:
f_idx = oldest_columns.get_loc(field)
field_vals = oldest_vals[f_idx]
# isnan would be fast, possible to use?
non_nan_sids = np.where(pd.notnull(field_vals))
key = (frequency.freq_str, field)
key_loc = self.last_known_prior_values.index.get_loc(key)
self.last_known_prior_values.values[
key_loc, non_nan_sids
] = field_vals[non_nan_sids]
def get_history(self, history_spec, algo_dt):
"""
Main API used by the algoscript is mapped to this function.
Selects from the overarching history panel the values for the
@history_spec at the given @algo_dt.
"""
field = history_spec.field
do_ffill = history_spec.ffill
# Get our stored values from periods prior to the current period.
digest_frame, index = self.digest_bars(history_spec, do_ffill)
# Get minutes from our buffer panel to build the last row of the
# returned frame.
buffer_panel = self.buffer_panel_minutes(
self.buffer_panel,
earliest_minute=self.cur_window_starts[history_spec.frequency],
raw=True
)
buffer_frame = buffer_panel[self.fields.get_loc(field)]
if do_ffill:
buffer_frame = ffill_buffer_from_prior_values(
history_spec.frequency,
field,
buffer_frame,
digest_frame,
self.last_known_prior_values,
raw=True
)
last_period = self.frame_to_series(field, buffer_frame, self.sids)
return fast_build_history_output(digest_frame,
last_period,
algo_dt,
index=index,
columns=self.sids)
def fast_build_history_output(buffer_frame,
last_period,
algo_dt,
index=None,
columns=None):
"""
Optimized concatenation of DataFrame and Series for use in
HistoryContainer.get_history.
Relies on the fact that the input arrays have compatible shapes.
"""
buffer_values = buffer_frame
if isinstance(buffer_frame, pd.DataFrame):
buffer_values = buffer_frame.values
index = buffer_frame.index
columns = buffer_frame.columns
return pd.DataFrame(
data=np.vstack(
[
buffer_values,
last_period,
]
),
index=fast_append_date_to_index(
index,
pd.Timestamp(algo_dt)
),
columns=columns,
)
def fast_append_date_to_index(index, timestamp):
"""
Append a timestamp to a DatetimeIndex. DatetimeIndex.append does not
appear to work.
"""
return pd.DatetimeIndex(
np.hstack(
[
index.values,
[timestamp.asm8],
]
),
tz='UTC',
)
| apache-2.0 |
giorgiop/scikit-learn | sklearn/feature_extraction/text.py | 13 | 52040 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = frombuffer_empty(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / 1 + df(d, t) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
jskDr/jamespy_py3 | medic/kdl.py | 1 | 53787 | """
KDL - deep learning for medic
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import convolve2d, fftconvolve
from sklearn import preprocessing, model_selection, metrics
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, Conv2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras import backend as K
from keras import callbacks
import kkeras
from . import beads
def fig2array(fig):
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# ones_255 = np.ones_like( data) * 255
# data = 255 - data
return data
def _gen_cell_r0(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r1(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
# print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r2(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_db_r0(N=5, rand_pos_cell=False, disp=False):
db_l = []
cell_img_org = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
for i in range(N):
if disp: # 1, 2, True (not 0 or False)
print('Iteration:', i)
elif disp == 2:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(bd_on=True, rand_pos_cell=rand_pos_cell)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
print("The end.")
return db_l
def gen_cell_db(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd = {'mean': 5, 'std': 1}
else:
stat_ext_bd = None
db_l = []
cell_img_org = gen_cell(bd_on=False,
rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
for i in range(N):
if disp:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(
bd_on=False, rand_pos_cell=rand_pos_cell,
max_bd=max_bd,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(
bd_on=True, rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db(db_l, fname_gz="sheet.gz/cell_db.cvs.gz"):
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["celltype"] = celltype
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
# ===================================
# Functions for the Center_Cell mode
# - gen_cell_n_beads,
# gen_cell_db_center_cell,
# save_cell_db_center_cell
# ===================================
def gen_cell(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
return gen_cell_n_beads(bd_on=bd_on,
rand_pos_cell=rand_pos_cell,
r_cell=r_cell, # 0<r_cell<=1
r_bd=r_bd, # 0<r_bd<=1
max_bd=max_bd, # max_bd >= 1
rand_bead_flag=True, # This is onlu changed part.
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd=stat_ext_bd,
bound_flag=bound_flag,
visible=visible,
disp=disp,
fig=fig,
ax=ax)
def gen_cell_n_beads(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean': 2, 'std': 2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd) + 1
else:
# Now, the number of total beads attached a cell is fixed (not random).
final_max_bd = max_bd
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = \
plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = \
plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
class CELL():
def __init__(self, flag_no_overlap_beads=False):
self.flag_no_overlap_beads = flag_no_overlap_beads
def gen(self,
bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean': 2, 'std': 2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def get_pos_bd(th):
return pos_cell + (r_cell + r_bd) * np.array((np.cos(th), np.sin(th)))
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = get_pos_bd(th)
return pos_bd
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd) + 1
else:
# Now, the number of total beads attached
# a cell is fixed (not random).
final_max_bd = max_bd
if not self.flag_no_overlap_beads:
rand_pos_bd_l = []
for bd_n in range(final_max_bd):
rand_pos_bd_l.append(rand_pos_bd())
else:
bead_center_l = []
cnt = 0
while len(bead_center_l) < final_max_bd:
# generate beads until the number of it reaches the limit
bead_center_l = beads.BEADS(r_cell, r_bd).gen_bead_centers(final_max_bd)
cnt += 1
assert cnt < 100, 'Try to reduce the number of beads!'
rand_pos_bd_l = [get_pos_bd(th/180*np.pi) for th in bead_center_l]
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = \
plt.Circle(rand_pos_bd_l[bd_n], r_bd, color='w')
if stat_ext_bd is not None:
n_ext_bd = np.random.randint(stat_ext_bd['mean'] + 1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = \
plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def gen_cell_n_nooverlap_beads(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean': 2, 'std': 2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd) + 1
else:
# Now, the number of total beads attached a cell is fixed (not random).
final_max_bd = max_bd
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = \
plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = \
plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def gen_cell_db_center_cell(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
flag_no_overlap_beads=False,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
cellgen = CELL(flag_no_overlap_beads=flag_no_overlap_beads)
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd = {'mean': 5, 'std': 1}
else:
stat_ext_bd = None
db_l = []
for i in range(N):
if disp:
print(i, end=",")
# no_beads is circulated from 0 to max_bd-1
# Hence, gen_cell_no_beads should be prepared.
n_beads = i % max_bd
cellbd_img = cellgen.gen(bd_on=True,
rand_pos_cell=rand_pos_cell,
# max_bd is repeated from 0 to max_bd
max_bd=n_beads,
rand_bead_flag=False,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db_center_cell(db_l, max_bd, fname_gz="sheet.gz/cell_db.cvs.gz"):
"""
Each image include a cell at the center location and
the numbers of beads in a cell is equally distributed
circulately. That is, 0 beads, 1 beads, ..., max_bd are repeated
for all images.
"""
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["n_beads"] = [i % max_bd] * np.prod(db.shape)
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
def _gen_save_cell_db_r0(N=5, fname_gz="sheet.gz/cell_db.cvs.gz",
extra_bead_on=True, rand_pos_cell=False,
max_bd=3,
classification_mode="Cancer_Normal_Cell",
disp=False):
"""
- Image show without pausing is needed. (Oct 31, 2016)
Parameters
==========
rand_pos_cell, Default=False
If it is True, the position of cell is varied
Otherwise, the position is fixed to be the center (0,0).
max_bd, int, default=3
The number of the maximum beads attached to a cell.
classification_mode, string, default="Cancer_Normal"
if it is "Cancer_Normal_Cell", this function classifies cancer or normal.
If it is "Center_Cell", this fucntion classifies numer of beads in each cell.
In this case, the number of beads in cells are equaly distributed
from 0 to max_bd. For example, if N=100 & max_bd=4, 0-beads,
1-beads, 2-beads and 3-beads cell images are repeated 25 times.
"""
def save(save_fn, db_l, max_bd=None, fname_gz=None):
fname_gz_fold, fname_gz_file = os.path.split(fname_gz)
os.makedirs(fname_gz_fold, exist_ok=True)
if max_bd is None:
cell_df = save_fn(db_l, fname_gz=fname_gz)
else:
cell_df = save_fn(db_l, max_bd, fname_gz=fname_gz)
return cell_df
if classification_mode == "Cancer_Normal_Cell":
db_l = gen_cell_db(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
# classification_mode=classification_mode,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db, db_l, fname_gz=fname_gz)
elif classification_mode == "Center_Cell":
assert int(N % max_bd) == 0, "N % max_bd should zero in the Center_Cell mode"
db_l = gen_cell_db_center_cell(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db_center_cell, db_l, max_bd,
fname_gz=fname_gz)
else:
raise ValueError("classification_mode = {} is not supported.".format(classification_mode))
return cell_df
def gen_save_cell_db(N=5, fname_gz="sheet.gz/cell_db.cvs.gz",
extra_bead_on=True, rand_pos_cell=False,
max_bd=3,
classification_mode="Cancer_Normal_Cell",
flag_no_overlap_beads=False,
disp=False):
"""
- Image show without pausing is needed. (Oct 31, 2016)
Parameters
==========
rand_pos_cell, Default=False
If it is True, the position of cell is varied
Otherwise, the position is fixed to be the center (0,0).
max_bd, int, default=3
The number of the maximum beads attached to a cell.
classification_mode, string, default="Cancer_Normal"
if it is "Cancer_Normal_Cell", this function classifies cancer or normal.
If it is "Center_Cell", this fucntion classifies numer of beads in each cell.
In this case, the number of beads in cells are equaly distributed
from 0 to max_bd. For example, if N=100 & max_bd=4, 0-beads,
1-beads, 2-beads and 3-beads cell images are repeated 25 times.
"""
def save(save_fn, db_l, max_bd=None, fname_gz=None):
fname_gz_fold, fname_gz_file = os.path.split(fname_gz)
os.makedirs(fname_gz_fold, exist_ok=True)
if max_bd is None:
cell_df = save_fn(db_l, fname_gz=fname_gz)
else:
cell_df = save_fn(db_l, max_bd, fname_gz=fname_gz)
return cell_df
if classification_mode == "Cancer_Normal_Cell":
db_l = gen_cell_db(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db, db_l, fname_gz=fname_gz)
elif classification_mode == "Center_Cell":
assert int(N % max_bd) == 0, "N % max_bd should zero in the Center_Cell mode"
db_l = gen_cell_db_center_cell(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
flag_no_overlap_beads=flag_no_overlap_beads,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db_center_cell, db_l, max_bd,
fname_gz=fname_gz)
else:
raise ValueError("classification_mode = {} is not supported.".format(classification_mode))
return cell_df
def gen_save_cell_db_no_overlap(N_each, max_bd_p1 = 4, disp=2):
"""
N_each is the number of cell images for each bead case.
max_pd_p1 is max_pd puls 1.
"""
N = N_each * max_bd_p1
max_bd = max_bd_p1 - 1
fname_gz="sheet.gz/cell_db{0}_center_cell_{1}_nooverlap.cvs.gz".format(N, max_bd)
print("Output file name is", fname_gz)
return gen_save_cell_db(N = N, fname_gz=fname_gz,
rand_pos_cell=False, max_bd=max_bd_p1,
classification_mode="Center_Cell",
extra_bead_on=False,
flag_no_overlap_beads=True,
disp=disp)
class obj:
def __init__(self, r, L=144):
"""
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
"""
# Initial values
self.Lx, self.Ly = L, L
self.downsamples = 4
self.d_um = 2.2 / self.downsamples
# Input and generated values
self.r = r
self.r_pixels_x = self.r * self.Lx
self.r_pixels_y = self.r * self.Ly
self.r_x_um = self.r_pixels_x * self.d_um
self.r_y_um = self.r_pixels_y * self.d_um
def get_h2d(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(1j * k * z) / (1j * l * z) * np.exp((1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
def get_h2d_inv(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(-1j * k * z) / (1j * l * z) * np.exp((-1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
# Freznel
def get_h(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def get_h_inv(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((-1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def fd_conv(Img_xy, h2d, mode ='same'):
#return convolve2d(Img_xy, h2d, mode=mode)
return fftconvolve(Img_xy, h2d, mode=mode)
def cell_fd_info(cell_df):
Lx = cell_df['x'].max() + 1
Ly = cell_df['y'].max() + 1
Limg = cell_df['ID'].max() + 1
#print( Lx, Ly, Limg)
return Limg, Lx, Ly
def cell_fd_conv(cell_df, h144=None):
Limg, Lx, Ly = cell_fd_info(cell_df)
if h144 is None:
h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)
cell_img_fd_l = []
for l in range(Limg):
cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
#cell_img_fd = fd_conv(cell_img, h144)
cell_img_fd = fftconvolve(cell_img, h144, mode='same')
cell_img_fd_l.append(cell_img_fd)
cell_img_fd_a = np.array(cell_img_fd_l)
#print( cell_img_fd_a.shape)
return cell_img_fd_a
def cell_fd_extention(fname_org='sheet.gz/cell_db.cvs.gz', camera_bit_resolution=14):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
def cell_fd_ext_save(fname_org='sheet.gz/cell_db100.cvs.gz',
fname_ext='sheet.gz/cell_fd_db100.cvs.gz'):
cell_df_ext = cell_fd_extention(fname_org)
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
class CELL_FD_EXT():
def __init__(self, fname_org, h2d=None, h2d_inv=None):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
if h2d is None:
h2d = get_h(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
if h2d_inv is None:
h2d_inv = get_h_inv(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
self.fname_org = fname_org
self.h2d = h2d
self.h2d_inv = h2d_inv
def save(self):
fname_org = self.fname_org
fname_ext = fname_org[:-7] + '_fd' + fname_org[-7:]
print('fname_ext is', fname_ext)
cell_df_ext = self.extention()
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
def extention(self, camera_bit_resolution=14):
fname_org = self.fname_org
h2d = self.h2d
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df, h2d)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
#Deep Learning
def run_dl_mgh_params_1cl_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
earlyStopping = callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc(history)
plt.show()
kkeras.plot_loss(history)
# Deep Learning
def run_dl_mgh_params_1cl_bn(X, y, Lx, Ly, nb_epoch=5000,
batch_size=128,
nb_classes=2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Conv2D(nb_filters, kernel_size,
padding='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc(history)
plt.show()
kkeras.plot_loss(history)
run_dl_mgh_params = run_dl_mgh_params_1cl_bn
def run_dl_mgh_params_1cl_bn_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
def run_dl_mgh_params_2cl_bn(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
# function name alias
run_dl_mgh_params_2cl = run_dl_mgh_params_2cl_bn
def run_dl_mgh_params_2cl_bn_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
"""
Dropout is also included after batchnormalization to protect
overfitting.
"""
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.1))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
# y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
run_dl_mgh_params_2cl_do = run_dl_mgh_params_2cl_bn_do
"""
Fresenel Diffraction with a new approach
"""
def f(x_um, y_um, z_mm=0.5, l_nm=405):
return np.exp(1j * np.pi * (np.power(x_um, 2) + np.power(y_um,2)) / (l_nm * z_mm))
def cimshow(f_impulse):
plt.figure(figsize=(7,5))
plt.subplot(2,2,1)
plt.imshow(np.real(f_impulse))
plt.colorbar()
plt.title('Re{}')
plt.subplot(2,2,2)
plt.imshow(np.imag(f_impulse))
plt.colorbar()
plt.title('Img{}')
plt.subplot(2,2,2+1)
plt.imshow(np.abs(f_impulse))
plt.colorbar()
plt.title('Magnitude')
plt.subplot(2,2,2+2)
plt.imshow(np.angle(f_impulse))
plt.colorbar()
plt.title('Phase')
def xy(MAX_x_um = 55, pixel_um=2.2, oversample_rate=4):
N = int(MAX_x_um / (pixel_um / oversample_rate))
x = np.dot(np.ones((N,1)), np.linspace(-MAX_x_um,MAX_x_um,N).reshape(1,-1))
y = np.dot(np.linspace(-MAX_x_um,MAX_x_um,N).reshape(-1,1), np.ones((1,N)))
return x, y
def u(x, y, alpha):
out = np.zeros_like(x)
out[(y>=-alpha/2)&(y<=alpha/2)&(x>=-alpha/2)&(x<=alpha/2)] = 1.0
return out
def u_circle(x,y,radius):
xy2 = np.power(x,2) + np.power(y,2)
# Since x is already matrix for griding, out shape is copied just from x.
# If x is a vector, the shape of out should be redefined to have 2-D form.
out = np.zeros_like(x)
out[xy2<=np.power(radius,2)] = 1.0
return out
# Code for generation H in frequency domain: H <--> h
# Gbp(n,m) = exp(1i*k*Dz*sqrt(1-lambda^2*fx(n,m)^2-lambda^2*fy(n,m)^2))
class GenG():
def upsampling(self, Pow2factor, dx1):
"""
Utility codes
"""
dx2 = dx1 / (2**Pow2factor)
return dx2
def __init__(self, NxNy=(144, 144), Dz_mm=0.5, delta_um = 2.2, UpsampleFactor=2, lambda_nm=405):
"""
oversample=2^UpsampleFactor
"""
delta_m = delta_um * 1e-6
delta2_m = self.upsampling(UpsampleFactor, delta_m)
Nx, Ny = NxNy
dfx = 1/(Nx*delta2_m)
dfy = 1/(Ny*delta2_m)
x = np.arange(-Ny/2, Ny/2)*dfy
y = np.arange(-Nx/2, Nx/2)*dfx
self.xv, self.yv = np.meshgrid(x, y)
self.lambda_m = lambda_nm * 1e-9
self.k_rad = 2*np.pi/self.lambda_m
self.Dz_m = Dz_mm * 1e-3
def bp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2)))
def fp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(-1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2))) | mit |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/dates.py | 4 | 42875 | #!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutils`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<http://labix.org/python-dateutil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import print_function
import re
import time
import math
import datetime
from itertools import izip
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60. * HOURS_PER_DAY
SECONDS_PER_DAY = 60. * MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6 * SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour / HOURS_PER_DAY + dt.minute / MINUTES_PER_DAY +
dt.second / SECONDS_PER_DAY +
dt.microsecond / MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6 - microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j):
j = np.asarray(j)
return j - 1721424.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n):
n = np.asarray(n)
return n + 1721424.5
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds / SECONDS_PER_DAY +
delta.microseconds / MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, eg with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = self._findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%D',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
formatter = AutoDateFormatter()
formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autofmt the date labels. The default format is the one to use
if none of the times in scaled match
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {365.0: '%Y',
30.: '%b %Y',
1.0: '%b %d %Y',
1. / 24.: '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f'}
def __call__(self, x, pos=0):
scale = float(self._locator._get_unit())
fmt = self.defaultfmt
for k in sorted(self.scaled):
if k >= scale:
fmt = self.scaled[k]
break
self._formatter = DateFormatter(fmt, self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((dmax, dmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, dmin, dmax,
self.MAXTICKS * 2))
dates = self.rule.between(dmin, dmax, True)
if len(dates) == 0:
return date2num([dmin, dmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if (freq == YEARLY):
return 365.0
elif (freq == MONTHLY):
return 30.0
elif (freq == WEEKLY):
return 7.0
elif (freq == DAILY):
return 1.0
elif (freq == HOURLY):
return (1.0 / 24.0)
elif (freq == MINUTELY):
return (1.0 / (24 * 60))
elif (freq == SECONDLY):
return (1.0 / (24 * 3600))
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(izip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32), range(0, 24),
range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - 365 * 2
vmax = vmax + 365 * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numMicroseconds = (numSeconds * 1e6) + delta.microseconds
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(izip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self, *args, **kwargs):
vmin, vmax = self.axis.get_view_interval()
vmin *= MUSECONDS_PER_DAY
vmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(vmin, vmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2 - d1
mus = abs(delta.days * MUSECONDS_PER_DAY + delta.seconds * 1e6 +
delta.microseconds)
assert(mus < epsilon)
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert(delta < epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24. * 3600.
return 719163 + np.asarray(e) / spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24. * 3600.
return (np.asarray(d) - 719163) * spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / 24.
minutes = span * 24 * 60
hours = span * 24
days = span
weeks = span / 7.
months = span / 31. # approx
years = span / 365.
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hours > numticks:
locator = HourLocator(interval=int(math.ceil(hours / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes > numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s) / SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m) / MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h / 24.
def weeks(w):
'Return weeks as days.'
return w * 7.
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
'Return the tzinfo instance of *x* or of its first element, or None'
try:
x = x[0]
except (TypeError, IndexError):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
| mit |
mjgrav2001/scikit-learn | sklearn/linear_model/coordinate_descent.py | 37 | 74167 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
pyrrho314/recipesystem | trunk/gempy/adlibrary/extract.py | 1 | 37277 | #! /usr/bin/env python
import sys, os
import time
import numpy as np
from matplotlib import pyplot as pl
import pyfits as pf
from astrodata import AstroData, new_pyfits_version
from astrodata import Lookups
from gempy.library import gfit
from gempy.adlibrary import segmentation as seg
# Load the timestamp keyword dictionary.
timestamp_keys = Lookups.get_lookup_table("Gemini/timestamp_keywords",
"timestamp_keys")
def print_timing(func):
def wrapper(*arg,**kargs):
t1 = time.time()
res = func(*arg,**kargs)
t2 = time.time()
print '%s took %0.3fs' % (func.func_name, (t2-t1))
return res
return wrapper
def trace_footprints(ad, function='polynomial', order=2,
trace_threshold=1., debug=False):
"""
This function finds the footprints edges of spectroscopic flats, creates a
BINTABLE extension with the footprint parameters and appends it to the output
AstroData object.
:param ad: Input Astrodata object.
:param function: Name of the fitting function to use.
:type function: Default is 'polynomial'.
:param order: Degree of the polynomial.
:type order: Default is 2
:param trace_threshold: Threshold in units of sigma to applied to the
filtered image.
:type trace_threshold: Default is 1.
:param debug: For debugging purposes.
:return adoutput: Output Astrodata object containing the input ad object plus
the TRACEFP binary table.
"""
#try:
# Find edges in the image, pair them in spectrum edges.
footprints = find_footprints(ad, function=function, order=order,
trace_threshold=trace_threshold,debug=debug)
# Create a FootprintTrace object.
ft = FootprintTrace(footprints)
# Use the footprint information to prepare BINTABLE format.
# tb_adout is of type AD
tb_adout = ft.as_bintable()
# Append to the input AD object
ad.append(tb_adout)
#except:
# raise SystemError(repr(sys.exc_info()[1]))
return ad
def find_footprints(ad, function='polynomial', order=2, trace_threshold=1.,
debug=False):
"""
Function to find footprint edges in an image that has
been processed with the Sobel convolution kernel.
These are the steps to find the footprint:
1) Use the edge_detector_data() function to read the input data
and MDF metadata from the AstroData object.
2) The function in 1) returns an <instrument>_EdgeDetector
subclass object.
3) Use this class method find_edges() returning two lists of lists,
one is the list of (x,y) coordinates for all the left/bottom edges
in the image and the other is the list of all the right/top edges
in each spectrum.
::
Input
ad: AstroData object.
function: Name of the fitting function to use.
order: Degree of the polynomial.
trace_threshold:
Threshold in units of sigma to applied to the
filtered image.
Output: List of Footprint objects where each object contains:
id: Integer reference number for footprint
region: Section of the image where the footprint solution is
valid, (x1, x2, y1, y2)
edges: Tuple of Edge object (edge_1,edge_2) defining the
long edges of the footprint.
width: Average width of the footprint (calculated with the edges)
"""
# After setting metadata instantiate an EdgeDetector
# class object, returning the object as the sublclass
# instance to which the 'ad' object indicates.
edt= seg.EdgeDetector(ad)
# Get a list of lists with (x,y) for each edge.
(xylist_1,xylist_2) = edt.find_edges()
# --- Create 2 lists of Edge objects. One (edge_1)
# has the left/bottom edge of the footprints and
# edge_2 has the right/top edge of the footprints.
# get orientation in degress
orientation = [0, 90][edt.axis]
# initialize list of lef/bottom (edge_1) edges and
# right/top (edge_2) edges.
edges_1 = []
edges_2 = []
# the lists xylist_1 and _2 are list of lists:
# xylist_ = [[] for _ in range(len(reference_edges))]
k=0
for xy_1,xy_2 in zip(xylist_1,xylist_2):
low = edt.footprint_low[k]
high = edt.footprint_high[k]
# For the current footprint (has 2 edges)
# get the list of x's and y's.
xx1 = np.asarray([x for x,y in xy_1])
yy1 = np.asarray([y for x,y in xy_1])
xx2 = np.asarray([x for x,y in xy_2])
yy2 = np.asarray([y for x,y in xy_2])
# For vertical edges, the yy1,yy2 are the independent
# variable arrays. Instantiate the Edge class.
# Instantiates left and right Edge objects
ed1 = seg.Edge(xx1,yy1)
ed2 = seg.Edge(xx2,yy2)
ed1.orientation = orientation
ed2.orientation = orientation
# Set the low and high location in the dispersion
# direction. Fit edges.
set_dispersion_limits(low,high,ed1,ed2,function,order)
k += 1
# add to the current lists of edges.
edges_1.append(ed1)
edges_2.append(ed2)
# --- Now that we have both footprint edges, make a list of
# Footprint objects.
footprints = []
sn = 1
for ed1,ed2 in zip(edges_1,edges_2):
footprint = Footprint(sn, ed1, ed2)
footprints.append(footprint)
sn += 1
if debug:
_plot_footprints(edt.image, footprints)
return footprints
def set_dispersion_limits(low,high,ed1,ed2,function,order):
""" Dispersion limits (low,high) are the minimum and
maximum value in the dispersion direction. They
are calculated using the instrument parameters
like gratings and filters plus tables in
dictionaries and others. Please see footprint_len()
for instrument specific details.
If low and high are defined, set them as values in the
Edge.trace (xx,yy) members and recalculate the
fit function.
"""
# Change the function and/or order if they are not the defaults.
if (function !='polynomial') | (order != 2):
for ed1,ed2 in zip(edges_1,edges_2):
ed1.setfunction(function)
ed1.setorder(order)
ed2.setfunction(function)
ed2.setorder(order)
for ed in [ed1,ed2]:
xx,yy = ed.trace
# fit Edges
ed.fitfunction()
# Make sure we have points in the range (iy1,iy2) in dispersion
if ed.orientation == 90:
g = np.where((yy >= low) & (yy <= high))
xx,yy = (xx[g],yy[g])
# Evaluate the spatial coordinate low and high
xx[0],xx[-1] = ed.evalfunction([low,high])
yy[0],yy[-1] = (low,high)
else:
g = np.where((xx >= low) & (xx <= high))
# Evaluate the spatial coordinate low and high
xx[0],xx[-1] = (low,high)
yy[0],yy[-1] = ed.evalfunction([low,high])
# Reset the trace attribute with these update coordinates.
ed.trace = (xx,yy)
# Now that we have good extreme points, fit again to set
# xlim,ylim as well.
ed.fitfunction()
return
def _plot_footprints(image,footprints):
"""
NOTE: This for development. Not for
public release
Plot the edges in the list array self.trace
Plot the footprint edges using the fitting functions.
"""
try:
from stsci.numdisplay import display
except ImportError:
from numdisplay import display
orientation = footprints[0].edges[0].orientation
pl.clf()
med = np.median(np.where(image>0,image,0))
for k,footprint in enumerate(footprints):
edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
xx1,yy1 = np.asarray(edge1.trace,int)
xx2,yy2 = np.asarray(edge2.trace,int)
# Plot (x_array,y_array)
evf1 = edge1.evalfunction
evf2 = edge2.evalfunction
if orientation == 90:
xx1 = np.asarray(evf1(yy1),int)
xx2 = np.asarray(evf2(yy2),int)
pl.plot(xx1,yy1,'b',xx2,yy2,'r')
else:
yy1 = np.asarray(evf1(xx1),int)
yy2 = np.asarray(evf2(xx2),int)
pl.plot(xx1,yy1,'b',xx2,yy2,'r')
image[yy1,xx1]=med*2
image[yy2,xx2]=med*2
display(image,frame=2,quiet=True)
def _fit_edges(edges):
"""
Fit a function to each edge.
Input:
edges: List of Edge object with (x,y) traces
returns:
edges: Same as input plus fit coefficients for each edge.
"""
for edge in edges:
# Use default edge.function, edge.order
edge.fitfunction()
class Footprint(object):
"""
Provides facilities to create a footprint each containing a pair
of edge traces.
::
Footprint attributes:
id: Integer reference number for footprint
region: Section of the image where the footprint solution is valid,
(x1, x2, y1, y2)
edges: Tuple of Edge object (edge_1,edge_2) defining the
long edges of the footprint.
width: Average width of the footprint.
"""
def __init__(self, id, edge_1,edge_2):
self.id = id
self.edges = (edge_1,edge_2)
x1 = np.min(edge_1.xlim+edge_2.xlim)
x2 = np.max(edge_1.xlim+edge_2.xlim)
y1 = np.min(edge_1.ylim+edge_2.ylim)
y2 = np.max(edge_1.ylim+edge_2.ylim)
if edge_1.orientation == 90:
# Evaluate x2 with the fitting function using the
# largest of y2 from both edges.
x2 = edge_2.evalfunction(y2)[0]
self.width = edge_2.evalfunction(y1)-edge_1.evalfunction(y1)
else:
# Evaluate y2 with the fitting function using the
# largest of x2 from both edges.
y2 = edge_2.evalfunction(x2)[0]
self.width = edge_2.evalfunction(x1)-edge_1.evalfunction(x1)
self.region = (x1, x2, y1, y2)
class FootprintTrace(object):
"""
FootprintTrace provides facilities to create a BINTABLE
extension with the input footprint list of objects.
Attributes:
footprints: Footprint object list.
Methods:
as_bintable: Creates BINTABLE
"""
def __init__(self,footprints):
self.footprints = footprints
def as_bintable(self):
"""
Creates a BINTABLE object from the
FootprintTrace object.
Input:
self.footprints: list of Footprint objects.
Output:
AD: HDU astrodata object with a TRACEFP bintable extension.
**Column discription**
::
'id' : integer reference number for footprint.
'region' : (x1,x2,y1,y2), window of pixel co-ords enclosing this
footprint. The origin of these coordinates could be
the lower left of the original image.
'range1' : (x1,x2,y1,y2), range where edge_1 is valid.
The origin of these coordinates is the lower left of the
original image.
'function1': Fit function name (default: polynomial) fitting edge_1.
'coeff1' : Arrray of coefficients, high to low order, such that
pol(x) = c1*x**2 + c2*x + c3 (for order 2).
'order1' : Order or polynomial (default: 2).
'range2' : ditto for edge_2.
'function2': ditto for edges_2
'coeff2' : ditto for edges_2
'order2' : ditto for edges_2
'cutrange1' : (x1,x2,y1,y2), range where edge_1 is valid.
The origin of these coordinates is the lower left
of the cutout region.
'cutfunction1': Fit function name (default: polynomial).
'cutcoeff1' : Arrray of coefficients, high to low order, such that
pol(x) = c1*x**2 + c2*x + c3 (for order 2)
'cutorder1' : Order or polynomial (default: 2).
'cutrange2' : ditto for edge_2
'cutfunction2': ditto for edge_2
'cutcoeff2' : ditto for edge_2
'cutorder2' : ditto for edge_2
"""
footprints = self.footprints
# Get n_coeffs'. We are assuming they are the same for all edges.
n_coeff = len(footprints[0].edges[0].coefficients)
c1 = pf.Column (name='id',format='J')
c2 = pf.Column (name='region',format='4E')
c3 = pf.Column (name='range1',format='4E')
c4 = pf.Column (name='function1',format='15A')
c5 = pf.Column (name='order1',format='J')
c6 = pf.Column (name='coeff1',format='%dE'%n_coeff)
c7 = pf.Column (name='range2',format='4E')
c8 = pf.Column (name='function2',format='15A')
c9 = pf.Column (name='order2',format='J')
c10 = pf.Column (name='coeff2',format='%dE'%n_coeff)
c11 = pf.Column (name='cutrange1',format='4E')
c12 = pf.Column (name='cutfunction1',format='15A')
c13 = pf.Column (name='cutorder1',format='J')
c14 = pf.Column (name='cutcoeff1',format='%dE'%n_coeff)
c15 = pf.Column (name='cutrange2',format='4E')
c16 = pf.Column (name='cutfunction2',format='15A')
c17 = pf.Column (name='cutorder2',format='J')
c18 = pf.Column (name='cutcoeff2',format='%dE'%n_coeff)
nrows = len(footprints)
tbhdu = pf.new_table(pf.ColDefs([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,\
c11,c12,c13,c14,c15,c16,c17,c18]),nrows=nrows)
tb = tbhdu # an alias
# Write data to table columns
orientation = footprints[0].edges[0].orientation
for k,footprint in enumerate(footprints):
edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
tb.data.field('id')[k] = footprint.id
tb.data.field('region')[k] = np.asarray(footprint.region)
# EGDE_1 DATA with respect to original image co-ords
range1 = np.asarray(edge1.xlim+edge1.ylim) # (x1, x2, y1, y2)
tb.data.field('range1')[k] = range1
tb.data.field('function1')[k] = edge1.function
tb.data.field('order1')[k] = edge1.order
tb.data.field('coeff1')[k] = edge1.coefficients
# EGDE_2 DATA with respect to original image co-ords
range2 = np.asarray(edge2.xlim+edge2.ylim) # (x1, x2, y1, y2)
tb.data.field('range2')[k] = range2
tb.data.field('function2')[k] = edge2.function
tb.data.field('order2')[k] = edge2.order
tb.data.field('coeff2')[k] = edge2.coefficients
region_x1 = footprint.region[0]
region_y1 = footprint.region[2]
# Setup the coefficient of the edge fit functions. We are
# shifting the origin; so refit
lcoeff=[]
zval=[]
for xx,yy in [edge1.trace,edge2.trace]:
# We need to refit inside the cutregion
xmr = xx - region_x1
ymr = yy - region_y1
if orientation == 0:
z = gfit.Gfit(xmr,ymr,edge1.function,edge1.order)
else:
z = gfit.Gfit(ymr,xmr,edge1.function,edge1.order)
lcoeff.append(z.coeff)
zval.append(z)
xlim1 = np.asarray(edge1.xlim)
ylim1 = np.asarray(edge1.ylim)
xlim2 = np.asarray(edge2.xlim)
ylim2 = np.asarray(edge2.ylim)
# Get the maximum values from both edges, so we can zero
# the areas outside the footprint when cutting.
#
if orientation == 0:
# Choose the largest x between both edges.
xmax = max(xlim1[1],xlim2[1])
xlim1[1] = xmax
xlim2[1] = xmax
x1,x2 = (min(0,xlim1[0]),xmax)
# And reevaluate the y values at this xmax
y1 = ylim1[0] - region_y1
y2 = zval[1](xmax)[0]
else:
# Choose the largest y between both edges
ymax = max(ylim1[1],ylim2[1])
ylim1[1] = ymax
ylim2[1] = ymax
y1,y2 = (min(0,ylim1[0]),ymax)
# And reevaluate the x values at this ymax
x1 = xlim1[0] - region_x1
x2 = zval[1](ymax)[0]
# --- Set edge_1 data with respect to cutout image co-ords.
tb.data.field('cutrange1')[k] = (x1,x2,y1,y2)
tb.data.field('cutfunction1')[k] = edge1.function
tb.data.field('cutorder1')[k] = edge1.order
tb.data.field('cutcoeff1')[k] = lcoeff[0]
# --- Set edge_2 data with respect to cutout image co-ords
# Applied offsets to range2 from footprint.region(x1,y1)
tb.data.field('cutrange2')[k] = (x1,x2,y1,y2)
tb.data.field('cutfunction2')[k] = edge2.function
tb.data.field('cutorder2')[k] = edge2.order
tb.data.field('cutcoeff2')[k] = lcoeff[1]
# Add comment to TTYPE card
hdr = tb.header
if new_pyfits_version:
hdr.update = hdr.set
hdr.update('TTYPE2',hdr['TTYPE2'],
comment='(x1,y1,x2,y2): footprint window of pixel co-ords.')
hdr.update('TTYPE3',hdr['TTYPE3'], comment='type of fitting function.')
hdr.update('TTYPE4',hdr['TTYPE4'], comment='Number of coefficients.')
hdr.update('TTYPE5',hdr['TTYPE5'],
comment='Coeff array: c[0]*x**3 + c[1]*x**2+ c[2]*x+c[3]')
hdr.update('TTYPE6',hdr['TTYPE6'],
comment='(x1,y1,x2,y2): Edge fit window definition.')
tb.header = hdr
# Create an AD object with this
tabad = AstroData(tbhdu)
tabad.rename_ext("TRACEFP", 1)
return tabad
def _plot_footprints(self):
"""
NOTE: This for development. Not for
public release
plot the edges in the list array self.trace
i: sequential number, can be edge number
Plot the footprint edges using the fitting functions.
"""
footprints = self.footprints
orientation = footprints[0].edges[0].orientation
for k,footprint in enumerate(footprints):
edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
xx1,yy1 = edge1.trace
xx2,yy2 = edge2.trace
evf1 = edge1.evalfunction
evf2 = edge2.evalfunction
if orientation == 0:
pl.plot(xx1,evf1(xx1),'b',xx2,evf2(xx2),'r')
else:
pl.plot(evf1(yy1),yy1,'b',evf2(yy2),yy2,'r')
import pywcs
def cut_footprints(ad, debug=False):
""" Creates individual footprint images from the information in the 'TRACEFP'
extension in the input AD object.
It returns an AD object with a list of IMAGE extensions; each
one containing a footprint cut.
INPUT
:param ad: Astrodata object with a 'TRACEFP' bintable extension.
:type adinput: Astrodata
:param debug: Display and plot. If True it will display on DS9 the cuts
performed with the enclosed footprint. The footprints
will have the edges highlighted to show
how good the tracing was. It also shows
the cut and the edges on a plot.
OUTPUT
:return adout: AD object with a list of IMAGE extensions containing
one footprint per cut as describe in the TRACEFP bintable
extension of the input AD object.
"""
try:
# Instantiate a CutFootprints object
cl = CutFootprints(ad,debug)
# Cut rectangular regions containing a footprint with its DQ and VAR
# region if any. The adout contains as many images extension as there
# entries in the TRACEFP table.
cl.cut_regions()
adout = cl.as_astrodata()
except:
raise SystemError(repr(sys.exc_info()[1]))
return adout
class CutFootprints():
"""
CutFootprints provides facilites to build a list of footprint
sections from the input TRACEFP table in the Astrodata object.
::
Methods
-------
cut_regions
Loop through the records of the TRACEFP table creating
one tuple per iteration containing (region, data,dq,and var
cut out pixel data)
cut_out:
Cut a region enclosing a footprint.
_init_as_astrodata
Initialize parameters to be used by as_astrodata.
as_astrodata
Form an hdu and each cutout and append it to adout.
Members
-------
ad: AD object containing the extension 'TRACEFP'
created by the ULF 'trace_footprint'.
has_var,has_dq:
Booleans to tell whether the VAR or DQ image sections
are in the input AD object.
orientation:
Int. Zero value for F2 and 90 for GMOS or GNIRS.
filename,instrument:
Use for output information when debug is True.
science,var,dq:
Reference to the input SCI, VAR and DQ image data.
cut_list:
List with the tuples. Each contains region, sci_cut,
var_cut and dq_cut footprint cut pixel data.
wcs: Input header WCS from pywcs.
debug:Display and plot. If True it will display on DS9 the cuts
performed with the enclosed footprint. The footprints
will have the edges highlighted to show
how good the tracing was. It also shows
the cut and the edges on a plot.
"""
def __init__(self,ad,debug=False):
self.ad = ad
self.has_dq = ad['DQ',1] != None
self.has_var = ad['VAR',1] != None
self.debug = debug
instrument = ad.instrument()
if instrument == 'F2':
self.orientation = 90
else:
if ad['SCI',1].dispersion_axis() == 1:
self.orientation = 0
else:
self.orientation = 90
if debug:
self.filename = ad.filename
self.instrument = instrument
self.dq = None
self.var = None
self.science = self.ad['SCI',1].data
if self.has_dq:
self.dq = self.ad['DQ', 1].data
if self.has_var:
self.var = self.ad['VAR',1].data
self.cut_list = [] # Contains the list of Cut objects
self.nregions = None # The number of records in TRACEFP
def cut_regions(self):
"""
Loop through the records of the TRACEFP table creating
one tuple per iteration with region,sci,dq,and var sections.
Then it appends each tuple to a list of cuts.
"""
table = self.ad['TRACEFP'].data
self.nregions = len(table.field('id'))
if self.debug:
plot_footprint(self.ad)
for rec in table:
cut_data = self.cut_out(rec)
self.cut_list.append(cut_data)
def cut_out(self,rec):
"""
Cut a region enclosing a footprint. Each cut is defined by 'region'
and the footprint in it is defined by the edges fitting functions.
The science section is zero out between the rectangle borders
and the footprint edge. The DQ section is bitwise ORed with 1.
::
Input:
rec: TRACEFP record
Output:
Tuple with region, sci_cut, dq_cut, var_cut data
"""
# Input frames to get cut outs from
science = self.science
var = self.var
dq = self.dq
t1=time.time()
id = rec.field('id')
region = rec.field('region')
cutrange1 = rec.field('cutrange1')
cutrange2 = rec.field('cutrange2')
evf1 = set_evalfunction(rec.field('cutcoeff1'))
evf2 = set_evalfunction(rec.field('cutcoeff2'))
# This is the rectangle coordinates containing one footprint
rx1,rx2,ry1,ry2 = region
# Get data sections. We add 1 to get the last element of the range.
sci_cut = science[ry1:ry2+1,rx1:rx2+1].copy()
# Define empty DQ, VAR cut array elements, in case the AD instance does
# not have them.
dq_cut,var_cut=(None,None)
has_dq = self.has_dq
has_var = self.has_var
if has_dq:
dq_cut = dq[ry1:ry2+1,rx1:rx2+1].copy()
if has_var:
var_cut = var[ry1:ry2+1,rx1:rx2+1].copy()
# Now clear (zero out) the area of the data and dq cuts
# between the rectangle and the footprint edges.
# Make indices representing the indices of a grid.
y,x=np.indices(sci_cut.shape)
# Generate mask values for the whole rectangular cut
# except for the footprint.
if self.orientation == 90:
# Mask values between indices of the cut left side
# and the indices of the left edge.
mask_1 = x < evf1(y)
# Mask values between the right edge indices and
# the indices of the cut right side.
# index of the left edge
mask_2 = evf2(y) < x
mask = mask_1 + mask_2
else:
# bottom
mask_1 = y < evf1(x)
# top
mask_2 = evf2(x) < y
mask = mask_1 + mask_2
sci_cut[mask] = 0
if has_var: var_cut[mask] = 0
if has_dq: dq_cut[mask] = np.bitwise_or(dq_cut[mask],1)
if self.debug:
# plot and display
plot_footprint_cut(sci_cut,x,y,self.orientation,evf1,evf2,
region, self.filename,self.instrument)
return (region, sci_cut, var_cut, dq_cut)
def _init_as_astrodata(self):
"""
Initialize parameters to be used by as_astrodata.
Creates a WCS object (pywcs) from the SCI header and
form the output AD object with the PHU and MDF from
the input AD. We are adding the TRACEFP extension as well
for later use on the spectral reduction process.
Input:
self.ad: AD object.
Output:
adout: Output AD object with AD phu and MDF
"""
ad = self.ad
# Start output AD with the original phu and the MDF extension.
adout = AstroData(phu=ad.phu)
adout.append(ad['MDF'])
adout.append(ad['TRACEFP'])
# Get wcs information. It is in the PHU
try:
self.wcs = pywcs.WCS(ad.phu.header)
if not hasattr(self.wcs.wcs, 'cd'):
self.wcs = None
except: # Something wrong with WCS, set it to None
self.wcs = None
return adout
def as_astrodata(self):
"""
With each cut object in the cut_list having the SCI,DQ,VAR set,
form an hdu and append it to adout. Update keywords EXTNAME= 'SCI',
EXTVER=<footprint#>, CCDSEC, DISPAXIS, CUTSECT, CUTORDER in the header
and reset WCS information if there was a WCS in the input AD header.
::
Input:
self.cut_list: List of Cut objects.
self.adout: Output AD object with MDF and
TRACEFP extensions.
Output:
adout: contains the appended HDUs.
"""
adout = self._init_as_astrodata()
ad = self.ad
scihdr = ad['SCI',1].header.copy()
if self.has_dq:
dqheader = ad['DQ', 1].header.copy()
if self.has_var:
varheader = ad['VAR',1].header.copy()
# Update NSCIEXT keyword to represent the current number of cuts.
if new_pyfits_version:
adout.phu.header.update = adout.phu.header.set
adout.phu.header.update('NSCIEXT',len(self.cut_list))
# This is a function renaming when using Pyfits 3.1
if new_pyfits_version:
scihdr.update = scihdr.set
extver = 1
# Generate the cuts using the region's sci_cut,var_cut and
# dq_cut
for region,sci_cut,var_cut,dq_cut in self.cut_list:
rx1,rx2,ry1,ry2 = np.asarray(region) + 1 # To 1-based
csec = '[%d:%d,%d:%d]'%(rx1,rx2,ry1,ry2)
scihdr.update('NSCUTSEC',csec,
comment="Region extracted by 'cut_footprints'")
scihdr.update('NSCUTSPC',extver,comment="Spectral order")
form_extn_wcs(scihdr, self.wcs, region)
new_sci_ext = AstroData(data=sci_cut,header=scihdr)
new_sci_ext.rename_ext(name='SCI',ver=extver)
adout.append(new_sci_ext)
if self.has_dq:
new_dq_ext = AstroData(data=dq_cut, header=dqheader)
new_dq_ext.rename_ext(name='DQ',ver=extver)
adout.append(new_dq_ext)
if self.has_var:
new_var_ext = AstroData(data=var_cut, header=varheader)
new_var_ext.rename_ext(name='VAR',ver=extver)
adout.append(new_var_ext)
extver += 1
return adout
def set_evalfunction(coeff):
"""
Utility function to form a polynomial given
a coefficients array.
Input:
coeff: coefficients array, no greater than
4 elements.
Output:
eval: Evaluator function
"""
terms = ['','*x', '*x**2', '*x**3','*x**4']
func = 'lambda x:'
cc = coeff[::-1] # The coefficients are from high order to low
for i in range(len(cc)):
func = func + '+%g'%cc[i]+terms[i]
evf = eval(func)
return evf
def form_extn_wcs(scihdr, wcs, region):
"""
Form wcs information for this cut and
update the header. The original WCS information
is used to calculate CRVAL1,2 of the center
of the cut. The CD matrix is unchanged.
We used PYWCS module for the transformations.
Input:
scihdr: SCI header from the original FITS WCS
wcs: WCS instance of pywcs
region: coords of the cut.
"""
if wcs == None: return # Don't do anything if wcs is bad
kwlist = ['equinox','ctype1','cunit1','crpix1','crval1','ctype2','cunit2',
'crpix2','crval2']
# Get the WCS keywords from the PHU
pheader = wcs.to_header()
rx1,rx2,ry1,ry2 = np.asarray(region) + 1 # To 1-based
# Calculate crpix,crval for this section middle point
cpix1 = (rx2-rx1+1)/2.0+rx1
cpix2 = (ry2-ry1+1)/2.0+ry1
(pheader['crval1'],pheader['crval2']), = wcs.wcs_pix2sky([[cpix1,cpix2]],1)
pheader['crpix1'] = cpix1-rx1+1 # Set origin of the section to (1,1)
pheader['crpix2'] = cpix2-ry1+1
if new_pyfits_version:
scihdr.update = scihdr.set
# Now form the WCS header
for kw in kwlist:
scihdr.update(kw,pheader[kw])
# Now CD. PC are the pywcs names for the CD's
scihdr.update('cd1_1',pheader['pc1_1'])
scihdr.update('cd1_2',pheader['pc1_2'])
scihdr.update('cd2_1',pheader['pc2_1'])
scihdr.update('cd2_2',pheader['pc2_2'])
return
def plot_footprint(ad):
""" Plot and display the edges found by trace_footprints.
This information is the TRACEFP bintable
extension in the AD object.
"""
try:
from stsci.numdisplay import display
except ImportError:
from numdisplay import display
if type(ad) is list: ad=ad[0]
if ad.instrument() == 'F2':
orientation = 90
else:
if ad['SCI',1].dispersion_axis() == 1:
orientation = 0
else:
orientation = 90
pl.clf()
tb = ad['TRACEFP'].data
data = ad['SCI',1].data
for rec in tb:
region = rec.field('region')
cutrange1 = rec.field('cutrange1')
cutrange2 = rec.field('cutrange2')
coeff1 = rec.field('cutcoeff1')
coeff2 = rec.field('cutcoeff2')
evf1 = set_evalfunction(coeff1)
evf2 = set_evalfunction(coeff2)
if orientation == 0:
x1,x2,y1,y2 = region
x = np.arange(x1,x2+1)
pl.plot(x,evf1(x)+y1,'b') # edge_1
pl.plot(x,evf2(x)+y1,'r') # edge_2
for x in np.arange(int(x1),int(x2+1.),20):
xi = slice(x,x+5)
xr = np.arange(x,x+5)
yi = list(evf1(xr)+y1) + list(evf2(xr)+y1)
data[yi,xi] = 9999
else: # Orientation is 90
x1,x2,y1,y2 = region
y = np.arange(y1,y2+1)
pl.plot(evf1(y)+x1,y,'b') # edge 1
pl.plot(evf2(y)+x1,y,'r') # edges 2
off = evf1(y1)
for y in range(int(y1),int(y2+1.),20):
yi = slice(y,y+5)
xi = list(evf1(np.arange(y,y+5))+x1-off) + \
list(evf2(np.arange(y,y+5))+x1-off)
data[yi,xi] = 99999
fname = os.path.basename(ad.filename)
pl.title(fname+' ('+ad.instrument()+')')
display(data,frame=1,quiet=True)
time.sleep(3)
def plot_footprint_cut(data,x,y,orientation,evf1,evf2,region,filename,instru):
"""
Plot the footprint cut inside a rectangle then
display the same cut on frame 2.
"""
try:
from stsci.numdisplay import display
except ImportError:
from numdisplay import display
pl.clf()
bval = data.max()
bval += bval/10.
#rx1,rx2,ry1,ry2 = region
ny,nx = data.shape
rxmin,rymin=(nx,ny)
rxmax,rymax=(0,0)
if (True):
if orientation == 0:
x = np.arange(nx)
rxmin,rxmax = (0,nx)
for evf,color in zip([evf1,evf2],['b','r']):
zy = evf(x)
pl.plot(x,zy,color)
imin = np.argmin(zy)
imax = np.argmax(zy)
rymin = min(rymin,zy[imin])
rymax = max(rymax,zy[imax])
pl.fill([rxmin,rxmax,rxmax,rxmin], [rymin,rymin,rymax,rymax],fill=False)
else: # Orientation is 90
y = np.arange(ny)
rymin,rymax = (0,ny)
for evf,color in zip([evf1,evf2],['b','r']):
zx = evf(y)
pl.plot(zx,y,color)
imin = np.argmin(zx)
imax = np.argmax(zx)
rxmin = min(rxmin,zx[imin])
rxmax = max(rxmax,zx[imax])
pl.fill([rxmin,rxmax,rxmax,rxmin], [rymin,rymin,rymax,rymax],fill=False)
fname = os.path.basename(filename)
pl.title(fname+' ('+instru+')')
#pl.xlabel(str(region))
pl.draw()
display(data,frame=2,z1=0,z2=bval,quiet=True)
time.sleep(1)
if __name__ == '__main__':
""" Testing in the unix shell
"""
from astrodata import AstroData
import time
f2='/data2/ed/data/fS20120104S0070.fits'
gnirs = '/data2/ed/data/nN20101215S0475_comb.fits'
gmos = '/data2/ed/data/mgS20100113S0110.fits'
t1=time.time()
for ff in [gmos,f2,gnirs]:
ad = AstroData(ff)
print 'MAIN:>>>>>>>>>>>>>>>>>',ad.instrument(),ad.filename
adout = trace_footprints(ad,debug=False)
print adout.info()
t2 = time.time()
print '.....trace_footprints:','(%.2f curr: %.1f)'%(t2-t1,t2-t1)
cl = CutFootprints(adout,debug=False)
t4=time.time()
cl.cut_regions()
t5=time.time()
print '.....cut_regions:','(%.2f curr: %.1f)'%(t5-t4,t5-t1)
adcut=cl.as_astrodata()
t6=time.time()
print '...cl.as_astrodata:','(%.2f curr: %.1f)'%(t6-t5,t6-t1)
#adcut.filename='adcut.fits'
#adcut.write(clobber=True)
#raw_input('Enter...to continue')
"""
k=1
for ad in adcut:
ad.filename='adlist'+str(k)+'.fits'
print '...Writing', ad.filename
ad.write(clobber=True)
k=k+1
print 'Total time:',time.time()-t1
"""
#nN20120305S0080_comb.fits # GNIRS 4 files comb (.15 pixscale) [4 secs] (new:3.5)
#fS20120104S0070.fits # F2 MOS [21 secs] (new: 8 sec)
#gS20100113S0110.fits # prepare gmos MOS flat
#mgS20100113S0110.fits # Mosaic GMOS [58 secs] (new: 12 sec)
| mpl-2.0 |
aarshayj/easyML | easyML/models_classification.py | 1 | 59771 | #####################################################################
##### IMPORT STANDARD MODULES
#####################################################################
#Python 3 support:
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import pydot
import os
from scipy.stats.mstats import chisquare, mode
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn import metrics, model_selection
from sklearn.feature_selection import RFE, RFECV
from abc import ABCMeta, abstractmethod
# from StringIO import StringIO
# import xgboost as xgb
# from xgboost.sklearn import XGBClassifier
from .genericmodelclass import GenericModelClass
from .data import DataBlock
#####################################################################
##### GENERIC MODEL CLASS
#####################################################################
class base_classification(GenericModelClass):
""" A base class which defines the generic classification functions
and variable definitions.
Parameters
----------
alg : object
An sklearn-style estimator
data_block : object
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
#Define as a meta class to disable direct instances
__metaclass__ = ABCMeta
# Map possible inputs to functions in sklean.metrics.
# Each value of the dictionary is a tuple of 3:
# (function, multi-class support, requires-probabilities)
# function: the sklearn metrics function
# multi-class support: if True, function allows multi-class support
# requires-probabilities: if True, the function requires
# probabilities to be passed as arguments
metrics_map = {
'accuracy':(metrics.accuracy_score,True,False),
'auc':(metrics.roc_auc_score,False,True),
'log_loss':(metrics.log_loss,True,True),
'f1':(metrics.f1_score,True,False),
'average_precision':(metrics.average_precision_score,False,True)
}
def __init__(
self, alg, data_block, predictors=[],cv_folds=5,
scoring_metric='accuracy',additional_display_metrics=[]
):
GenericModelClass.__init__(
self, alg=alg, data_block=data_block, predictors=predictors,
cv_folds=cv_folds,scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics)
#Run input datatype checks:
self.check_datatype(data_block,'data_block',DataBlock)
self.subset_check(predictors)
self.check_datatype(cv_folds,'cv_folds',int)
self.check_datatype(scoring_metric,'scoring_metric',basestring)
self.check_datatype(
additional_display_metrics,'additional_display_metrics',list)
#Store predicted probabilities in a dictionary with keys as the
# name of the dataset (train/test/predict) and values as the actual
# predictions.
self.predictions_probabilities = {}
#Boolean to store whether the estimator chosen allows probability
# predictions
self.probabilities_available = True
#Define number of classes in target.
self.num_target_class = len(
self.datablock.train[self.datablock.target].unique())
#A Series object to store generic classification model outcomes.
self.classification_output=pd.Series(
index = ['ModelID','CVScore_mean','CVScore_std','AUC',
'ActualScore (manual entry)','CVMethod','Predictors']
)
#Get the dictionary of available dataframes
self.dp = self.datablock.data_present()
#Check all the entered metrics. Note that this check has to be
#placed after declaration of num_target_class attribute
for metric in [scoring_metric]+additional_display_metrics:
self.check_metric(metric,self.num_target_class)
@classmethod
def check_metric(cls,metric,num_target_class):
if metric not in cls.metrics_map:
raise self.InvalidInput("The input '%s' is not a valid scoring metric for this module"%metric)
if num_target_class>2:
if not cls.metrics_map[metric][1]:
raise self.InvalidInput("The %s metric does not support multi-class classification case"%metric)
def fit_model(
self, performCV=True, printResults=True,
printTopN=None, printConfusionMatrix=True,
printModelParameters=True):
"""An advanced model fit function which fits the model on the
training data and performs cross-validation. It prints a model
report containing the following:
- The parameters being used to fit the model
- Confusion matrix for the train and test data
- Scoring metrics for the train and test data
- CV mean and std scores for scoring metric
- Additional scoring metrics on train and test data, if specified
Note that you can decide which details are to be printed using method
arguments.
Parameters
----------
performCV : bool, default True
if True, the model performs cross-validation using the number of
folds as the cv_folds parameter of the model
printResults : bool, default True
if True, prints the report of the model. This should be kept as
True unless the module being used in a background script
printTopN : int, default None
The number of top scored features to be displayed in the feature
importance or coefficient plot of the model. If None, all the
features will be displayed by default. Note:
- For algorithms supporting real coefficient, the features will
be sorted by their magnitudes (absolute values).
- For algorithms supporting positive feature importance scores,
features are sorted on the score itself.
This will be ignored is printResults is False.
printConfusionMatrix : bool, default True
if True, the confusion matrix for the train and test dataframes
are printed, otherwise they are ommitted.
This will be ignored is printResults is False.
print
printModelParameters : bool, default True
if True, the parameters being used to the run the model are
printed. It helps in validating the parameters and also makes
jupyter notebooks more informative if used
"""
self.check_datatype(performCV,'performCV',bool)
self.check_datatype(printResults,'printResults',bool)
self.check_datatype(printConfusionMatrix,'printConfusionMatrix',bool)
self.check_datatype(printModelParameters,'printModelParameters',bool)
if printTopN:
self.check_datatype(printTopN,'printTopN',int)
self.alg.fit(
self.datablock.train[self.predictors],
self.datablock.train[self.datablock.target])
#Get algo_specific_values
self.algo_specific_fit(printTopN)
#Get predictions:
for key,data in self.dp.items():
self.predictions_class[key] = self.alg.predict(
data[self.predictors])
if self.probabilities_available:
for key,data in self.dp.items():
self.predictions_probabilities[key] = self.alg.predict_proba(
data[self.predictors])
self.calc_model_characteristics(performCV)
if printResults:
self.printReport(printConfusionMatrix, printModelParameters)
def calc_model_characteristics(self, performCV=True):
# Determine key metrics to analyze the classification model. These
# are stored in the classification_output series object belonginf to
# this class.
for metric in [self.scoring_metric]+self.additional_display_metrics:
#Determine for both test and train, except predict:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
#Case where probabilities to be passed as arguments
if base_classification.metrics_map[metric][2]:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_probabilities[key])
#case where class predictions to be passed as arguments
else:
self.classification_output[name] = \
base_classification.metrics_map[metric][0](
data[self.datablock.target],
self.predictions_class[key])
#Determine confusion matrix:
name = 'ConfusionMatrix_%s'%key
self.classification_output[name] = pd.crosstab(
data[self.datablock.target],
self.predictions_class[key]
).to_string()
if performCV:
cv_score = self.KFold_CrossValidation(
scoring_metric=self.scoring_metric)
else:
cv_score = {
'mean_error': 0.0,
'std_error': 0.0
}
self.classification_output['CVMethod'] = \
'KFold - ' + str(self.cv_folds)
self.classification_output['CVScore_mean'] = cv_score['mean_error']
self.classification_output['CVScore_std'] = cv_score['std_error']
self.classification_output['Predictors'] = str(self.predictors)
def printReport(self, printConfusionMatrix, printModelParameters):
# Print the metric determined in the previous function.
print("\nModel Report")
#Outpute the parameters used for modeling
if printModelParameters:
print('\nModel being built with the following parameters:')
print(self.alg.get_params())
if printConfusionMatrix:
for key,data in self.dp.items():
if key!='predict':
print("\nConfusion Matrix for %s data:"%key)
print(pd.crosstab(
data[self.datablock.target],
self.predictions_class[key])
)
print('Note: rows - actual; col - predicted')
print("\nScoring Metric:")
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(self.scoring_metric,key)
print("\t%s (%s): %s" %
(
self.scoring_metric,
key,
"{0:.3%}".format(self.classification_output[name])
)
)
print("\nCV Score for Scoring Metric (%s):"%self.scoring_metric)
print("\tMean - %f | Std - %f" % (
self.classification_output['CVScore_mean'],
self.classification_output['CVScore_std'])
)
if self.additional_display_metrics:
print("\nAdditional Scoring Metrics:")
for metric in self.additional_display_metrics:
for key,data in self.dp.items():
if key!='predict':
name = '%s_%s'%(metric,key)
print("\t%s (%s): %s" % (
metric,
key,
"{0:.3%}".format(
self.classification_output[name])
)
)
def plot_feature_importance(self, printTopN):
num_print = len(self.feature_imp)
if printTopN is not None:
num_print = min(printTopN,len(self.feature_imp))
self.feature_imp.iloc[:num_print].plot(
kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show(block=False)
def plot_abs_coefficients(self,coeff,printTopN):
num_print = len(coeff)
if printTopN is not None:
num_print = min(printTopN,num_print)
coeff_abs_sorted = sorted(
abs(coeff).index,
key=lambda x: abs(coeff_abs[x]),
reverse=True
)
coeff[coeff_abs_sorted].iloc[:num_print,].plot(
kind='bar',
title='Feature Coefficients (Sorted by Magnitude)'
)
plt.ylabel('Magnitute of Coefficients')
plt.show(block=False)
def submission_proba(
self, IDcol, proba_colnames,filename="Submission.csv"):
"""
"""
submission = pd.DataFrame({
x: self.datablock.predict[x] for x in list(IDcol)
})
if len(list(proba_colnames))>1:
for i in range(len(proba_colnames)):
submission[proba_colnames[i]] = self.test_pred_prob[:,i]
else:
submission[list(proba_colnames)[0]] = self.test_pred_prob[:,1]
submission.to_csv(filename, index=False)
def set_parameters(self, param=None, cv_folds=None, set_default=False):
""" Set the parameters of the model. Only the parameters to be
updated are required to be passed.
Parameters
__________
param : dict, default None
A dictionary of key,value pairs where the keys are the parameters
to be updated and values as the new value of those parameters.
If None, no update performed
Ignored if set_default iss True.
cv_folds : int, default None
Pass the number of CV folds to be used in the model.
If None, no update performed.
set_default : bool, default True
if True, the model will be set to default parameters as defined
in model definition by scikit-learn. Note that this will not
affect the cv_folds parameter.
"""
#Check input
self.check_datatype(param,'param',dict)
self.check_datatype(set_default,'set_default',bool)
if param:
if set(param.keys()).issubset(
set(base_classification.default_parameters.keys())
):
raise self.InvalidInput("""The parameters passed should be a
subset of the model parameters""")
if set_default:
param = self.default_parameters
self.alg.set_params(**param)
self.model_output.update(pd.Series(param))
if cv_folds:
self.cv_folds = cv_folds
def export_model_base(self, IDcol, mstr):
self.create_ensemble_dir()
filename = os.path.join(os.getcwd(),'ensemble/%s_models.csv'%mstr)
comb_series = self.classification_output.append(
self.model_output,
verify_integrity=True)
if os.path.exists(filename):
models = pd.read_csv(filename)
mID = int(max(models['ModelID'])+1)
else:
mID = 1
models = pd.DataFrame(columns=comb_series.index)
comb_series['ModelID'] = mID
models = models.append(comb_series, ignore_index=True)
models.to_csv(filename, index=False, float_format="%.5f")
model_filename = os.path.join(
os.getcwd(),
'ensemble/%s_%s.csv'%(mstr,str(mID))
)
self.submission(IDcol, model_filename)
@abstractmethod
def algo_specific_fit(self,printTopN):
#Run algo-specific commands
pass
@abstractmethod
def export_model(self,IDcol):
#Export models
pass
#####################################################################
##### LOGISTIC REGRESSION
#####################################################################
class logistic_regression(base_classification):
""" Create a Logistic Regression model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'C':1.0,
'tol':0.0001,
'solver':'liblinear',
'multi_class':'ovr',
'class_weight':'balanced'
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=LogisticRegression(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output=pd.Series(self.default_parameters)
self.model_output['Coefficients'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
if self.num_target_class==2:
coeff = pd.Series(
np.concatenate(
(self.alg.intercept_,
self.alg.coef_[0])),
index=["Intercept"]+self.predictors
)
self.plot_abs_coefficients(coeff,printTopN)
else:
cols=['coef_class_%d'%i for i in range(0,self.num_target_class)]
coeff = pd.DataFrame(
self.alg.coef_.T,
columns=cols,
index=self.predictors
)
print('\nCoefficients:')
print(coeff)
self.model_output['Coefficients'] = coeff.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'logistic_reg')
#####################################################################
##### DECISION TREE
#####################################################################
class decision_tree(base_classification):
""" Create a Decision Tree model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':None,
'random_state':None,
'max_leaf_nodes':None,
'class_weight':'balanced'
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=DecisionTreeClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'decision_tree')
## UNDER DEVELOPMENT CODE FOR PRINTING TREES
# def get_tree(self):
# return self.alg.tree_
# Print the tree in visual format
# Inputs:
# export_pdf - if True, a pdf will be exported with the
# filename as specified in pdf_name argument
# pdf_name - name of the pdf file if export_pdf is True
# def printTree(self, export_pdf=True, file_name="Decision_Tree.pdf"):
# dot_data = StringIO()
# export_graphviz(
# self.alg, out_file=dot_data, feature_names=self.predictors,
# filled=True, rounded=True, special_characters=True)
# export_graphviz(
# self.alg, out_file='data.dot', feature_names=self.predictors,
# filled=True, rounded=True, special_characters=True
# )
# graph = pydot.graph_from_dot_data(dot_data.getvalue())
# if export_pdf:
# graph.write_pdf(file_name)
# return graph
#####################################################################
##### RANDOM FOREST
#####################################################################
class random_forest(base_classification):
""" Create a Random Forest model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':10,
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':'auto',
'max_leaf_nodes':None,
'oob_score':False,
'random_state':None,
'class_weight':'balanced',
'n_jobs':1
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=RandomForestClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
self.model_output['OOB_Score'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
if self.model_output['oob_score']:
print('OOB Score : %f' % self.alg.oob_score_)
self.model_output['OOB_Score'] = self.alg.oob_score_
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'random_forest')
#####################################################################
##### EXTRA TREES FOREST
#####################################################################
class extra_trees(base_classification):
""" Create an Extra Trees Forest model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':10,
'criterion':'gini',
'max_depth':None,
'min_samples_split':2,
'min_samples_leaf':1,
'max_features':'auto',
'max_leaf_nodes':None,
'oob_score':False,
'random_state':None,
'class_weight':'balanced',
'n_jobs':1
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=ExtraTreesClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
self.model_output['OOB_Score'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
if self.model_output['oob_score']:
print('OOB Score : %f' % self.alg.oob_score_)
self.model_output['OOB_Score'] = self.alg.oob_score_
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'extra_trees')
#####################################################################
##### ADABOOST CLASSIFICATION
#####################################################################
class adaboost(base_classification):
""" Create an AdaBoost model using implementation from
scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'n_estimators':50,
'learning_rate':1.0
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=AdaBoostClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
plt.xlabel("AdaBoost Estimator")
plt.ylabel("Estimator Error")
plt.plot(
range(1, int(self.model_output['n_estimators'])+1),
self.alg.estimator_errors_
)
plt.plot(
range(1, int(self.model_output['n_estimators'])+1),
self.alg.estimator_weights_
)
plt.legend(
['estimator_errors','estimator_weights'],
loc='upper left'
)
plt.show(block=False)
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'adaboost')
#####################################################################
##### GRADIENT BOOSTING MACHINE
#####################################################################
class gradient_boosting_machine(base_classification):
""" Create a GBM (Gradient Boosting Machine) model using implementation
from scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'loss':'deviance',
'learning_rate':0.1,
'n_estimators':100,
'subsample':1.0,
'min_samples_split':2,
'min_samples_leaf':1,
'max_depth':3, 'init':None,
'random_state':None,
'max_features':None,
'verbose':0,
'max_leaf_nodes':None,
'warm_start':False,
'presort':'auto'
}
def __init__(
self, data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=GradientBoostingClassifier(), data_block=data_block,
predictors=predictors,cv_folds=cv_folds,
scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output = pd.Series(self.default_parameters)
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
def algo_specific_fit(self, printTopN):
# print Feature Importance Scores table
self.feature_imp = pd.Series(
self.alg.feature_importances_,
index=self.predictors
).sort_values(ascending=False)
self.plot_feature_importance(printTopN)
self.model_output['Feature_Importance'] = \
self.feature_imp.to_string()
#Plot OOB estimates if subsample <1:
if self.model_output['subsample']<1:
plt.xlabel("GBM Iteration")
plt.ylabel("Score")
plt.plot(
range(1, self.model_output['n_estimators']+1),
self.alg.oob_improvement_
)
plt.legend(['oob_improvement_','train_score_'], loc='upper left')
plt.show(block=False)
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'gbm')
#####################################################################
##### Support Vector Classifier
#####################################################################
class linear_svm(base_classification):
""" Create a Linear Support Vector Machine model using implementation
from scikit-learn.
Parameters
----------
data_block : object of type easyML.DataBlock
An object of easyML's DataBlock class. You should first create an
object of that class and then pass it as a parameter.
predictors : list of strings, default []
A list of columns which are to be used as predictors (also called
independent variables or features).
The default value is an empty list because these need not always be
defined at the time of class initialization. The set_predictors
method can be used later but before creating any predictive model.
cv_folds : int, default 5
The number of folds to be created while performing CV.
This parameter can be adjusted later by passing using the
set_parameters method
scoring_metric : str, default 'accuracy'
The scoring metric to be used for evaluating the model across the
different functions available. The available options are
- 'accuracy'
- 'auc'
- 'log_loss'
- 'f1'
- 'average_precision'
additional_display_metrics : list of string, default []
A list of additional display metrics to be shown for the test and
train dataframes in data_block. Note:
- These will be just shown for user reference and not actually used
for model evaluation
- The same available options as scoring_metric apply
"""
default_parameters = {
'C':1.0,
'kernel':'linear', #modified not default
'degree':3,
'gamma':'auto',
'coef0':0.0,
'shrinking':True,
'probability':False,
'tol':0.001,
'cache_size':200,
'class_weight':None,
'verbose':False,
'max_iter':-1,
'decision_function_shape':None,
'random_state':None
}
def __init__(
self,data_block, predictors=[],cv_folds=10,
scoring_metric='accuracy',additional_display_metrics=[]):
base_classification.__init__(
self, alg=SVC(), data_block=data_block, predictors=predictors,
cv_folds=cv_folds,scoring_metric=scoring_metric,
additional_display_metrics=additional_display_metrics
)
self.model_output=pd.Series(self.default_parameters)
self.model_output['Coefficients'] = "-"
#Set parameters to default values:
self.set_parameters(set_default=True)
#Check if probabilities enables:
if not self.alg.get_params()['probability']:
self.probabilities_available = False
def algo_specific_fit(self, printTopN):
if self.num_target_class==2:
coeff = pd.Series(
np.concatenate((self.alg.intercept_,self.alg.coef_[0])),
index=["Intercept"]+self.predictors
)
#print the chart of importances
self.plot_abs_coefficients(coeff, printTopN)
else:
cols=['coef_class_%d'%i for i in range(0,self.num_target_class)]
coeff = pd.DataFrame(
self.alg.coef_.T,
columns=cols,
index=self.predictors
)
print('\nCoefficients:')
print(coeff)
self.model_output['Coefficients'] = coeff.to_string()
def export_model(self, IDcol):
#Export the model into the model file as well as create a submission
#with model index. This will be used for creating an ensemble.
self.export_model_base(IDcol,'linear_svm')
#####################################################################
##### XGBOOST ALGORITHM (UNDER DEVELOPMENT)
#####################################################################
"""
#Define the class similar to the overall classification class
class XGBoost(base_classification):
def __init__(self,data_block, predictors, cv_folds=5,scoring_metric_skl='accuracy', scoring_metric_xgb='error'):
base_classification.__init__(self, alg=XGBClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds,scoring_metric=scoring_metric_skl)
#Define default parameters on your own:
self.default_parameters = {
'max_depth':3, 'learning_rate':0.1,
'n_estimators':100, 'silent':True,
'objective':"binary:logistic",
'nthread':1, 'gamma':0, 'min_child_weight':1,
'max_delta_step':0, 'subsample':1, 'colsample_bytree':1, 'colsample_bylevel':1,
'reg_alpha':0, 'reg_lambda':1, 'scale_pos_weight':1,
'base_score':0.5, 'seed':0, 'missing':None
}
self.model_output = pd.Series(self.default_parameters)
#create DMatrix with nan as missing by default. If later this is changed then the matrix are re-calculated. If not set,will give error is nan present in data
self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=np.nan)
self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=np.nan)
self.num_class = 2
self.n_estimators = 10
self.eval_metric = 'error'
self.train_predictions = []
self.train_pred_prob = []
self.test_predictions = []
self.test_pred_prob = []
self.num_target_class = len(data_train[target].unique())
#define scoring metric:
self.scoring_metric_skl = scoring_metric_skl
# if scoring_metric_xgb=='f1':
# self.scoring_metric_xgb = self.xg_f1
# else:
self.scoring_metric_xgb = scoring_metric_xgb
#Define a Series object to store generic classification model outcomes;
self.classification_output=pd.Series(index=['ModelID','Accuracy','CVScore_mean','CVScore_std','SpecifiedMetric',
'ActualScore (manual entry)','CVMethod','ConfusionMatrix','Predictors'])
#feature importance (g_scores)
self.feature_imp = None
self.model_output['Feature_Importance'] = "-"
#Set parameters to default values:
# self.set_parameters(set_default=True)
#Define custom f1 score metric:
def xg_f1(self,y,t):
t = t.get_label()
y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y] # binaryzing your output
return 'f1',metrics.f1_score(t,y_bin)
# Set the parameters of the model.
# Note:
# > only the parameters to be updated are required to be passed
# > if set_default is True, the passed parameters are ignored and default parameters are set which are defined in scikit learn module
def set_parameters(self, param=None, set_default=False):
if set_default:
param = self.default_parameters
self.alg.set_params(**param)
self.model_output.update(pd.Series(param))
if 'missing' in param:
#update DMatrix with missing:
self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=param['missing'])
self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=param['missing'])
if 'num_class' in param:
self.num_class = param['num_class']
if 'cv_folds' in param:
self.cv_folds = param['cv_folds']
# def set_feature_importance(self):
# fs = self.alg.booster().get_fscore()
# ftimp = pd.DataFrame({
# 'feature': fs.keys(),
# 'importance_Score': fs.values()
# })
# ftimp['predictor'] = ftimp['feature'].apply(lambda x: self.predictors[int(x[1:])])
# self.feature_imp = pd.Series(ftimp['importance_Score'].values, index=ftimp['predictor'].values)
#Fit the model using predictors and parameters specified before.
# Inputs:
# printCV - if True, CV is performed
def modelfit(self, performCV=True, useTrainCV=False, TrainCVFolds=5, early_stopping_rounds=20, show_progress=True, printTopN='all'):
if useTrainCV:
xgb_param = self.alg.get_xgb_params()
if self.num_class>2:
xgb_param['num_class']=self.num_class
if self.scoring_metric_xgb=='f1':
cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds,
metrics=['auc'],feval=self.xg_f1,early_stopping_rounds=early_stopping_rounds, show_progress=show_progress)
else:
cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds,
metrics=self.scoring_metric_xgb, early_stopping_rounds=early_stopping_rounds, show_progress=show_progress)
self.alg.set_params(n_estimators=cvresult.shape[0])
print(self.alg.get_params())
obj = self.alg.fit(self.datablock.train[self.predictors], self.datablock.train[self.datablock.target], eval_metric=self.eval_metric)
#Print feature importance
# self.set_feature_importance()
self.feature_imp = pd.Series(self.alg.booster().get_fscore()).sort_values(ascending=False)
num_print = len(self.feature_imp)
if printTopN is not None:
if printTopN != 'all':
num_print = min(printTopN,len(self.feature_imp))
self.feature_imp.iloc[:num_print].plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
plt.show(block=False)
self.model_output['Feature_Importance'] = self.feature_imp.to_string()
#Get train predictions:
self.train_predictions = self.alg.predict(self.datablock.train[self.predictors])
self.train_pred_prob = self.alg.predict_proba(self.datablock.train[self.predictors])
#Get test predictions:
self.test_predictions = self.alg.predict(self.datablock.predict[self.predictors])
self.test_pred_prob = self.alg.predict_proba(self.datablock.predict[self.predictors])
self.calc_model_characteristics(performCV)
self.printReport()
#Export the model into the model file as well as create a submission with model index. This will be used for creating an ensemble.
def export_model(self, IDcol):
self.create_ensemble_dir()
filename = os.path.join(os.getcwd(),'ensemble/xgboost_models.csv')
comb_series = self.classification_output.append(self.model_output, verify_integrity=True)
if os.path.exists(filename):
models = pd.read_csv(filename)
mID = int(max(models['ModelID'])+1)
else:
mID = 1
models = pd.DataFrame(columns=comb_series.index)
comb_series['ModelID'] = mID
models = models.append(comb_series, ignore_index=True)
models.to_csv(filename, index=False, float_format="%.5f")
model_filename = os.path.join(os.getcwd(),'ensemble/xgboost_'+str(mID)+'.csv')
self.submission(IDcol, model_filename)
"""
#####################################################################
##### ENSEMBLE (UNDER DEVELOPMENT)
#####################################################################
"""
#Class for creating an ensemble model using the exported files from previous classes
class Ensemble_Classification(object):
#initialize the object with target variable
def __init__(self, target, IDcol):
self.datablock.target = target
self.data = None
self.relationMatrix_chi2 = None
self.relationMatrix_diff = None
self.IDcol = IDcol
#create the ensemble data
# Inputs:
# models - dictionary with key as the model name and values as list containing the model numbers to be ensebled
# Note: all the models in the list specified should be present in the ensemble folder. Please cross-check once
def create_ensemble_data(self, models):
self.data = None
for key, value in models.items():
# print key,value
for i in value:
fname = key + '_' + str(i)
fpath = os.path.join(os.getcwd(), 'ensemble', fname+'.csv')
tempdata = pd.read_csv(fpath)
tempdata = tempdata.rename(columns = {self.datablock.target: fname})
if self.data is None:
self.data = tempdata
else:
self.data = self.data.merge(tempdata,on=self.data.columns[0])
#get the data being used for ensemble
def get_ensemble_data(self):
return self.data
#Check chisq test between different model outputs to check which combination of ensemble will generate better results. Note: Models with high correlation should not be combined together.
def chisq_independence(self, col1, col2, verbose = False):
contingencyTable = pd.crosstab(col1,col2,margins=True)
if len(col1)/((contingencyTable.shape[0] - 1) * (contingencyTable.shape[1] - 1)) <= 5:
return "TMC"
expected = contingencyTable.copy()
total = contingencyTable.loc["All","All"]
# print contingencyTable.index
# print contingencyTable.columns
for m in contingencyTable.index:
for n in contingencyTable.columns:
expected.loc[m,n] = contingencyTable.loc[m,"All"]*contingencyTable.loc["All",n]/float(total)
if verbose:
print('\n\nAnalysis of models: %s and %s' % (col1.name, col2.name))
print('Contingency Table:')
print(contingencyTable)
# print '\nExpected Frequency Table:'
# print expected
observed_frq = contingencyTable.iloc[:-1,:-1].values.ravel()
expected_frq = expected.iloc[:-1,:-1].values.ravel()
numless1 = len(expected_frq[expected_frq<1])
perless5 = len(expected_frq[expected_frq<5])/len(expected_frq)
#Adjustment in DOF so use the 1D chisquare to matrix shaped data; -1 in row n col because of All row and column
matrixadj = (contingencyTable.shape[0] - 1) + (contingencyTable.shape[1] - 1) - 2
# print matrixadj
pval = np.round(chisquare(observed_frq, expected_frq,ddof=matrixadj)[1],3)
if numless1>0 or perless5>=0.2:
return str(pval)+"*"
else:
return pval
#Create the relational matrix between models
def check_ch2(self, verbose=False):
col = self.data.columns[1:]
self.relationMatrix_chi2 = pd.DataFrame(index=col,columns=col)
for i in range(len(col)):
for j in range(i, len(col)):
if i==j:
self.relationMatrix_chi2.loc[col[i],col[j]] = 1
else:
pval = self.chisq_independence(self.data.iloc[:,i+1],self.data.iloc[:,j+1], verbose=verbose)
self.relationMatrix_chi2.loc[col[j],col[i]] = pval
self.relationMatrix_chi2.loc[col[i],col[j]] = pval
print('\n\n Relational Matrix (based on Chi-square test):')
print(self.relationMatrix_chi2)
def check_diff(self):
col = self.data.columns[1:]
self.relationMatrix_diff = pd.DataFrame(index=col,columns=col)
nrow = self.data.shape[0]
for i in range(len(col)):
for j in range(i, len(col)):
if i==j:
self.relationMatrix_diff.loc[col[i],col[j]] = '-'
else:
# print col[i],col[j]
pval = "{0:.2%}".format(sum( np.abs(self.data.iloc[:,i+1]-self.data.iloc[:,j+1]) )/float(nrow))
self.relationMatrix_diff.loc[col[j],col[i]] = pval
self.relationMatrix_diff.loc[col[i],col[j]] = pval
print('\n\n Relational Matrix (based on perc difference):')
print(self.relationMatrix_diff)
#Generate submission for the ensembled model by combining the mentioned models.
# Inputs:
# models_to_use - list with model names to use; if None- all models will be used
# filename - the filename of the final submission
# Note: the models should be odd in nucmber to allow a clear winner in terms of mode otherwise the first element will be chosen
def submission(self, models_to_use=None, filename="Submission_ensemble.csv"):
#if models_to_use is None then use all, else filter:
if models_to_use is None:
data_ens = self.data
else:
data_ens = self.data[models_to_use]
def mode_ens(x):
return int(mode(x).mode[0])
ensemble_output = data_ens.apply(mode_ens,axis=1)
submission = pd.DataFrame({
self.IDcol: self.data.iloc[:,0],
self.datablock.target: ensemble_output
})
submission.to_csv(filename, index=False)
""" | bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/mathtext.py | 1 | 108907 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email mdroe@stsci.edu, but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
def unichr_safe(index):
"""Return the Unicode character corresponding to the index,
or the replacement character if this is a narrow build of Python
and the requested character is outside the BMP."""
try:
return unichr(index)
except ValueError:
return unichr(0xFFFD)
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.iceberg, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
if rcParams['text.hinting']:
return LOAD_FORCE_AUTOHINT
else:
return LOAD_NO_HINTING
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
self.svg_glyphs.append(
(info.font, info.fontsize, info.num, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendPath(MathtextBackend):
"""
Store information to write a mathtext rendering to the text path
machinery.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = info.num
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, self.height-y2 , x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr_safe(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(str(basename))
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname in ('it', 'regular') and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s' [U%x]" %
(new_fontname, sym.encode('ascii', 'backslashreplace'), uniindex),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'
}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if (self._sans and mapping is None and
fontname not in ('regular', 'default')):
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping.get(font_class, 'rm')
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = rcParams['mathtext.default']
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr_safe(uniindex)))
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm',
directory=self.basepath)
if filename is None:
filename = findfont('Helvetica', fontext='afm',
directory=self.basepath)
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.fonts['regular'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv \dots'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
stackrel = Group(
Suppress(Literal(r"\stackrel"))
+ ((group + group)
| Error(r"Expected \stackrel{num}{den}"))
).setParseAction(self.stackrel).setName("stackrel")
binom = Group(
Suppress(Literal(r"\binom"))
+ ((group + group)
| Error(r"Expected \binom{num}{den}"))
).setParseAction(self.binom).setName("binom")
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
rightDelimSafe = oneOf(list(self._rightDelim - set(['}'])))
genfrac = Group(
Suppress(Literal(r"\genfrac"))
+ ((Suppress(Literal('{')) +
oneOf(list(self._ambiDelim | self._leftDelim | set(['']))) +
Suppress(Literal('}')) +
Suppress(Literal('{')) +
oneOf(list(self._ambiDelim |
(self._rightDelim - set(['}'])) |
set(['', r'\}']))) +
Suppress(Literal('}')) +
Suppress(Literal('{')) +
Regex("[0-9]*(\.?[0-9]*)?") +
Suppress(Literal('}')) +
group + group + group)
| Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
).setParseAction(self.genfrac).setName("genfrac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
overline = Group(
Suppress(Literal(r"\overline"))
+ (group | Error("Expected \overline{value}"))
).setParseAction(self.overline).setName("overline")
placeable <<(function
^ (c_over_c | symbol)
^ accent
^ group
^ frac
^ stackrel
^ binom
^ genfrac
^ sqrt
^ overline
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
OneOrMore(
autoDelim
^ simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('rm', 'it', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = rcParams['mathtext.default']
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
if c == "'":
c = '\prime'
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent',
r'overrightarrow' : r'\rightarrow',
r'overleftarrow' : r'\leftarrow'
}
_wide_accents = set(r"widehat widetilde widebar".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def _genfrac(self, ldelim, rdelim, rule, style, num, den):
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
rule = float(rule)
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
elif rdelim == r'\}':
rdelim = '}'
return self._auto_sized_delimiter(ldelim, result, rdelim)
return result
def genfrac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==6)
return self._genfrac(*tuple(toks[0]))
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness, '', num, den)
def stackrel(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('', '', 0.0, '', num, den)
def binom(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('(', ')', 0.0, '', num, den)
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==1)
body = toks[0][0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state),
Fill(),
Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front, middle, back):
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle)
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
return self._auto_sized_delimiter(front, middle.asList(), back)
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'path' : MathtextBackendPath,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
| gpl-2.0 |
afronski/playground-notes | introduction-to-big-data-with-apache-spark/solutions/lab2_apache_log_student.py | 1 | 38489 |
# coding: utf-8
# version 1.0.1
# # + 
# # **Web Server Log Analysis with Apache Spark**
#
# ####This lab will demonstrate how easy it is to perform web server log analysis with Apache Spark.
#
# ####Server log analysis is an ideal use case for Spark. It's a very large, common data source and contains a rich set of information. Spark allows you to store your logs in files on disk cheaply, while still providing a quick and simple way to perform data analysis on them. This homework will show you how to use Apache Spark on real-world text-based production logs and fully harness the power of that data. Log data comes from many sources, such as web, file, and compute servers, application logs, user-generated content, and can be used for monitoring servers, improving business and customer intelligence, building recommendation systems, fraud detection, and much more.
# ### How to complete this assignment
#
# ####This assignment is broken up into sections with bite-sized examples for demonstrating Spark functionality for log processing. For each problem, you should start by thinking about the algorithm that you will use to *efficiently* process the log in a parallel, distributed manner. This means using the various [RDD](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) operations along with [`lambda` functions](https://docs.python.org/2/tutorial/controlflow.html#lambda-expressions) that are applied at each worker.
#
# ####This assignment consists of 4 parts:
# #### *Part 1*: Apache Web Server Log file format
# #### *Part 2*: Sample Analyses on the Web Server Log File
# #### *Part 3*: Analyzing Web Server Log File
# #### *Part 4*: Exploring 404 Response Codes
# ### **Part 1: Apache Web Server Log file format**
# ####The log files that we use for this assignment are in the [Apache Common Log Format (CLF)](http://httpd.apache.org/docs/1.3/logs.html#common). The log file entries produced in CLF will look something like this:
# `127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839`
#
# ####Each part of this log entry is described below.
# * `127.0.0.1`
# ####This is the IP address (or host name, if available) of the client (remote host) which made the request to the server.
#
# * `-`
# ####The "hyphen" in the output indicates that the requested piece of information (user identity from remote machine) is not available.
#
# * `-`
# ####The "hyphen" in the output indicates that the requested piece of information (user identity from local logon) is not available.
#
# * `[01/Aug/1995:00:00:01 -0400]`
# ####The time that the server finished processing the request. The format is:
# `[day/month/year:hour:minute:second timezone]`
# * ####day = 2 digits
# * ####month = 3 letters
# * ####year = 4 digits
# * ####hour = 2 digits
# * ####minute = 2 digits
# * ####second = 2 digits
# * ####zone = (\+ | \-) 4 digits
#
# * `"GET /images/launch-logo.gif HTTP/1.0"`
# ####This is the first line of the request string from the client. It consists of a three components: the request method (e.g., `GET`, `POST`, etc.), the endpoint (a [Uniform Resource Identifier](http://en.wikipedia.org/wiki/Uniform_resource_identifier)), and the client protocol version.
#
# * `200`
# ####This is the status code that the server sends back to the client. This information is very valuable, because it reveals whether the request resulted in a successful response (codes beginning in 2), a redirection (codes beginning in 3), an error caused by the client (codes beginning in 4), or an error in the server (codes beginning in 5). The full list of possible status codes can be found in the HTTP specification ([RFC 2616](https://www.ietf.org/rfc/rfc2616.txt) section 10).
#
# * `1839`
# ####The last entry indicates the size of the object returned to the client, not including the response headers. If no content was returned to the client, this value will be "-" (or sometimes 0).
#
# ####Note that log files contain information supplied directly by the client, without escaping. Therefore, it is possible for malicious clients to insert control-characters in the log files, *so care must be taken in dealing with raw logs.*
#
# ### NASA-HTTP Web Server Log
# ####For this assignment, we will use a data set from NASA Kennedy Space Center WWW server in Florida. The full data set is freely available (http://ita.ee.lbl.gov/html/contrib/NASA-HTTP.html) and contains two month's of all HTTP requests. We are using a subset that only contains several days worth of requests.
# ### **(1a) Parsing Each Log Line**
# ####Using the CLF as defined above, we create a regular expression pattern to extract the nine fields of the log line using the Python regular expression [`search` function](https://docs.python.org/2/library/re.html#regular-expression-objects). The function returns a pair consisting of a Row object and 1. If the log line fails to match the regular expression, the function returns a pair consisting of the log line string and 0. A '-' value in the content size field is cleaned up by substituting it with 0. The function converts the log line's date string into a Python `datetime` object using the given `parse_apache_time` function.
# In[1]:
import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
# In[2]:
# A regular expression pattern to extract fields from the log line
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
# ### **(1b) Configuration and Initial RDD Creation**
# ####We are ready to specify the input log file and create an RDD containing the parsed log file data. The log file has already been downloaded for you.
#
# ####To create the primary RDD that we'll use in the rest of this assignment, we first load the text file using [`sc.textfile(logFile)`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext.textFile) to convert each line of the file into an element in an RDD.
# ####Next, we use [`map(parseApacheLogLine)`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.map) to apply the parse function to each element (that is, a line from the log file) in the RDD and turn each line into a pair [`Row` object](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.Row).
# ####Finally, we cache the RDD in memory since we'll use it throughout this notebook.
# In[4]:
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: %d' % failed_logs.count()
for line in failed_logs.take(20):
print 'Invalid logline: %s' % line
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (parsed_logs.count(), access_logs.count(), failed_logs.count())
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
# ### **(1c) Data Cleaning**
# #### Notice that there are a large number of log lines that failed to parse. Examine the sample of invalid lines and compare them to the correctly parsed line, an example is included below. Based on your observations, alter the `APACHE_ACCESS_LOG_PATTERN` regular expression below so that the failed lines will correctly parse, and press `Shift-Enter` to rerun `parseLogs()`.
#
# `127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839`
#
# #### If you not familar with Python regular expression [`search` function](https://docs.python.org/2/library/re.html#regular-expression-objects), now would be a good time to check up on the [documentation](https://developers.google.com/edu/python/regular-expressions). One tip that might be useful is to use an online tester like http://pythex.org or http://www.pythonregex.com. To use it, copy and paste the regular expression string below (located between the single quotes ') and test it against one of the 'Invalid logline' above.
# In[6]:
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[\+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
# In[7]:
# TEST Data cleaning (1c)
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
# ### **Part 2: Sample Analyses on the Web Server Log File**
#
# ####Now that we have an RDD containing the log file as a set of Row objects, we can perform various analyses.
#
# #### **(2a) Example: Content Size Statistics**
#
# ####Let's compute some statistics about the sizes of content being returned by the web server. In particular, we'd like to know what are the average, minimum, and maximum content sizes.
#
# ####We can compute the statistics by applying a `map` to the `access_logs` RDD. The `lambda` function we want for the map is to extract the `content_size` field from the RDD. The map produces a new RDD containing only the `content_sizes` (one element for each Row object in the `access_logs` RDD). To compute the minimum and maximum statistics, we can use [`min()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.min) and [`max()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.max) functions on the new RDD. We can compute the average statistic by using the [`reduce`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduce) function with a `lambda` function that sums the two inputs, which represent two elements from the new RDD that are being reduced together. The result of the `reduce()` is the total content size from the log and it is to be divided by the number of requests as determined using the [`count()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.count) function on the new RDD.
# In[8]:
# Calculate statistics based on the content size.
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: %s' % (
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max())
# #### **(2b) Example: Response Code Analysis**
# ####Next, lets look at the response codes that appear in the log. As with the content size analysis, first we create a new RDD by using a `lambda` function to extract the `response_code` field from the `access_logs` RDD. The difference here is that we will use a [pair tuple](https://docs.python.org/2/tutorial/datastructures.html?highlight=tuple#tuples-and-sequences) instead of just the field itself. Using a pair tuple consisting of the response code and 1 will let us count how many records have a particular response code. Using the new RDD, we perform a [`reduceByKey`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduceByKey) function. `reduceByKey` performs a reduce on a per-key basis by applying the `lambda` function to each element, pairwise with the same key. We use the simple `lambda` function of adding the two values. Then, we cache the resulting RDD and create a list by using the [`take`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.take) function.
# In[9]:
# Response Code to Count
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found %d response codes' % len(responseCodeToCountList)
print 'Response Code Counts: %s' % responseCodeToCountList
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
# #### **(2c) Example: Response Code Graphing with `matplotlib`**
# ####Now, lets visualize the results from the last example. We can visualize the results from the last example using [`matplotlib`](http://matplotlib.org/). First we need to extract the labels and fractions for the graph. We do this with two separate `map` functions with a `lambda` functions. The first `map` function extracts a list of of the response code values, and the second `map` function extracts a list of the per response code counts divided by the total size of the access logs. Next, we create a figure with `figure()` constructor and use the `pie()` method to create the pie plot.
# In[10]:
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
# In[11]:
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '%.0f%%' % value
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
pass
# #### **(2d) Example: Frequent Hosts**
# ####Let's look at hosts that have accessed the server multiple times (e.g., more than ten times). As with the response code analysis in (2b), first we create a new RDD by using a `lambda` function to extract the `host` field from the `access_logs` RDD using a pair tuple consisting of the host and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then filter the result based on the count of accesses by each host (the second element of each pair) being greater than ten. Next, we extract the host name by performing a `map` with a `lambda` function that returns the first element of each pair. Finally, we extract 20 elements from the resulting RDD - *note that the choice of which elements are returned is not guaranteed to be deterministic.*
# In[12]:
# Any hosts that has accessed the server more than 10 times.
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: %s' % hostsPick20
# An example: [u'204.120.34.185', u'204.243.249.9', u'slip1-32.acs.ohio-state.edu', u'lapdog-14.baylor.edu', u'199.77.67.3', u'gs1.cs.ttu.edu', u'haskell.limbex.com', u'alfred.uib.no', u'146.129.66.31', u'manaus.bologna.maraut.it', u'dialup98-110.swipnet.se', u'slip-ppp02.feldspar.com', u'ad03-053.compuserve.com', u'srawlin.opsys.nwa.com', u'199.202.200.52', u'ix-den7-23.ix.netcom.com', u'151.99.247.114', u'w20-575-104.mit.edu', u'205.25.227.20', u'ns.rmc.com']
# #### **(2e) Example: Visualizing Endpoints**
# ####Now, lets visualize the number of hits to endpoints (URIs) in the log. To perform this task, we first create a new RDD by using a `lambda` function to extract the `endpoint` field from the `access_logs` RDD using a pair tuple consisting of the endpoint and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then cache the results.
#
# ####Next we visualize the results using `matplotlib`. We previously imported the `matplotlib.pyplot` library, so we do not need to import it again. We perform two separate `map` functions with `lambda` functions. The first `map` function extracts a list of endpoint values, and the second `map` function extracts a list of the visits per endpoint values. Next, we create a figure with `figure()` constructor, set various features of the plot (axis limits, grid lines, and labels), and use the `plot()` method to create the line plot.
# In[13]:
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
pass
# #### **(2f) Example: Top Endpoints**
# ####For the final example, we'll look at the top endpoints (URIs) in the log. To determine them, we first create a new RDD by using a `lambda` function to extract the `endpoint` field from the `access_logs` RDD using a pair tuple consisting of the endpoint and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then extract the top ten endpoints by performing a [`takeOrdered`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.takeOrdered) with a value of 10 and a `lambda` function that multiplies the count (the second element of each pair) by -1 to create a sorted list with the top endpoints at the bottom.
# In[14]:
# Top Endpoints
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: %s' % topEndpoints
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20292)], 'incorrect Top Ten Endpoints'
# ### **Part 3: Analyzing Web Server Log File**
#
# ####Now it is your turn to perform analyses on web server log files.
# #### **(3a) Exercise: Top Ten Error Endpoints**
# ####What are the top ten endpoints which did not have return code 200? Create a sorted list containing top ten endpoints and the number of times that they were accessed with non-200 return code.
#
# ####Think about the steps that you need to perform to determine which endpoints did not have a 200 return code, how you will uniquely count those endpoints, and sort the list.
#
# ####You might want to refer back to the previous Lab (Lab 1 Word Count) for insights.
# In[15]:
not200 = access_logs.filter(lambda log: log.response_code <> 200)
endpointCountPairTuple = not200.map(lambda log: (log.endpoint, 1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a, b : a + b)
topTenErrURLs = endpointSum.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten failed URLs: %s' % topTenErrURLs
# In[16]:
# TEST Top ten error endpoints (3a)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
# #### **(3b) Exercise: Number of Unique Hosts**
# ####How many unique hosts are there in the entire log?
#
# ####Think about the steps that you need to perform to count the number of different hosts in the log.
# In[19]:
hosts = access_logs.map(lambda log: log.host)
uniqueHosts = hosts.countByValue()
uniqueHostCount = len(uniqueHosts)
print 'Unique hosts: %d' % uniqueHostCount
# In[20]:
# TEST Number of unique hosts (3b)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
# #### **(3c) Exercise: Number of Unique Daily Hosts**
# ####For an advanced exercise, let's determine the number of unique hosts in the entire log on a day-by-day basis. This computation will give us counts of the number of unique daily hosts. We'd like a list sorted by increasing day of the month which includes the day of the month and the associated number of unique hosts for that day. Make sure you cache the resulting RDD `dailyHosts` so that we can reuse it in the next exercise.
#
# ####Think about the steps that you need to perform to count the number of different hosts that make requests *each* day.
# ####*Since the log only covers a single month, you can ignore the month.*
# In[74]:
def empty(a):
return set([ a ])
def add(acc, a):
acc.update([ a ])
return acc
def merge(acc1, acc2):
return acc1.union(acc2)
dayToHostPairTuple = access_logs.map(lambda log: (log.date_time.day, log.host))
dayGroupedHosts = dayToHostPairTuple.combineByKey(empty, add, merge)
dayHostCount = dayGroupedHosts.map(lambda (day, hosts): (day, len(hosts)))
dailyHosts = dayHostCount.sortByKey().cache()
dailyHostsList = dailyHosts.take(30)
print 'Unique hosts per day: %s' % dailyHostsList
# In[75]:
# TEST Number of unique daily hosts (3c)
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
# #### **(3d) Exercise: Visualizing the Number of Unique Daily Hosts**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" graph of the unique hosts requests by day.
# #### `daysWithHosts` should be a list of days and `hosts` should be a list of number of unique hosts for each corresponding day.
# #### * How could you convert a RDD into a list? See the [`collect()` method](http://spark.apache.org/docs/latest/api/python/pyspark.html?highlight=collect#pyspark.RDD.collect)*
# In[50]:
daysWithHosts = dailyHosts.map(lambda (day, hosts): day).collect()
hosts = dailyHosts.map(lambda (day, hosts): hosts).collect()
# In[51]:
# TEST Visualizing unique daily hosts (3d)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
# In[52]:
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
pass
# #### **(3e) Exercise: Average Number of Daily Requests per Hosts**
# ####Next, let's determine the average number of requests on a day-by-day basis. We'd like a list by increasing day of the month and the associated average number of requests per host for that day. Make sure you cache the resulting RDD `avgDailyReqPerHost` so that we can reuse it in the next exercise.
# ####To compute the average number of requests per host, get the total number of request across all hosts and divide that by the number of unique hosts.
# ####*Since the log only covers a single month, you can skip checking for the month.*
# ####*Also to keep it simple, when calculating the approximate average use the integer value - you do not need to upcast to float*
# In[80]:
reqsPerDay = access_logs.map(lambda log: (log.date_time.day, 1)).reduceByKey(lambda a, b: a + b).sortByKey()
groupedByDay = reqsPerDay.join(dailyHosts)
avgDailyReqPerHost = groupedByDay.map(lambda (day, (r, h)): (day, r / h)).sortByKey().cache()
avgDailyReqPerHostList = avgDailyReqPerHost.take(30)
print 'Average number of daily requests per Hosts is %s' % avgDailyReqPerHostList
# In[81]:
# TEST Average number of daily requests per hosts (3e)
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
# #### **(3f) Exercise: Visualizing the Average Daily Requests per Unique Host**
# ####Using the result `avgDailyReqPerHost` from the previous exercise, use `matplotlib` to plot a "Line" graph of the average daily requests per unique host by day.
# #### `daysWithAvg` should be a list of days and `avgs` should be a list of average daily requests per unique hosts for each corresponding day.
# In[83]:
daysWithAvg = avgDailyReqPerHost.map(lambda (day, r): day).collect()
avgs = avgDailyReqPerHost.map(lambda (day, r): r).collect()
# In[84]:
# TEST Average Daily Requests per Unique Host (3f)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
# In[85]:
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
pass
# ### **Part 4: Exploring 404 Response Codes**
#
# ####Let's drill down and explore the error 404 response code records. 404 errors are returned when an endpoint is not found by the server (i.e., a missing page or object).
# #### **(4a) Exercise: Counting 404 Response Codes**
# #### Create a RDD containing only log records with a 404 response code. Make sure you `cache()` the RDD `badRecords` as we will use it in the rest of this exercise.
#
# #### How many 404 records are in the log?
# In[88]:
badRecords = (access_logs
.filter(lambda log: log.response_code == 404)
.cache())
print 'Found %d 404 URLs' % badRecords.count()
# In[89]:
# TEST Counting 404 (4a)
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
# #### **(4b) Exercise: Listing 404 Response Code Records**
# ####Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list up to 40 **distinct** endpoints that generate 404 errors - *no endpoint should appear more than once in your list.*
# In[93]:
badEndpoints = badRecords.map(lambda log: log.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: %s' % badUniqueEndpointsPick40
# In[94]:
# TEST Listing 404 records (4b)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
# #### **(4c) Exercise: Listing the Top Twenty 404 Response Code Endpoints**
# ####Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list of the top twenty endpoints that generate the most 404 errors.
# ####*Remember, top endpoints should be in sorted order*
# In[96]:
badEndpointsCountPairTuple = badRecords.map(lambda log: (log.endpoint, 1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a, b: a + b)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, lambda s: -1 * s[1])
print 'Top Twenty 404 URLs: %s' % badEndpointsTop20
# In[97]:
# TEST Top twenty 404 URLs (4c)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif>', 43), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
# #### **(4d) Exercise: Listing the Top Twenty-five 404 Response Code Hosts**
# ####Instead of looking at the endpoints that generated 404 errors, let's look at the hosts that encountered 404 errors. Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list of the top twenty-five hosts that generate the most 404 errors.
# In[98]:
errHostsCountPairTuple = badRecords.map(lambda log: (log.host, 1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a, b: a + b)
errHostsTop25 = errHostsSum.takeOrdered(25, lambda s: -1 * s[1])
print 'Top 25 hosts that generated errors: %s' % errHostsTop25
# In[99]:
# TEST Top twenty-five 404 response code hosts (4d)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
# #### **(4e) Exercise: Listing 404 Response Codes per Day**
# ####Let's explore the 404 records temporally. Break down the 404 requests by day (`cache()` the RDD `errDateSorted`) and get the daily counts sorted by day as a list.
# ####*Since the log only covers a single month, you can ignore the month in your checks.*
# In[103]:
errDateCountPairTuple = badRecords.map(lambda log: (log.date_time.day, 1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b : a + b)
errDateSorted = errDateSum.sortByKey().cache()
errByDate = errDateSorted.collect()
print '404 Errors by day: %s' % errByDate
# In[104]:
# TEST 404 response codes per day (4e)
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
# #### **(4f) Exercise: Visualizing the 404 Response Codes by Day**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" or "Bar" graph of the 404 response codes by day.
# In[105]:
daysWithErrors404 = errDateSorted.map(lambda (day, num): day).collect()
errors404ByDay = errDateSorted.map(lambda (day, num): num).collect()
# In[106]:
# TEST Visualizing the 404 Response Codes by Day (4f)
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
# In[107]:
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
# #### **(4g) Exercise: Top Five Days for 404 Response Codes **
# ####Using the RDD `errDateSorted` you cached in the part (4e), what are the top five days for 404 response codes and the corresponding counts of 404 response codes?
# In[110]:
topErrDate = errDateSorted.takeOrdered(5, lambda s: s[1] * -1)
print 'Top Five dates for 404 requests: %s' % topErrDate
# In[111]:
# TEST Five dates for 404 requests (4g)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
# #### **(4h) Exercise: Hourly 404 Response Codes**
# ####Using the RDD `badRecords` you cached in the part (4a) and by hour of the day and in increasing order, create an RDD containing how many requests had a 404 return code for each hour of the day (midnight starts at 0). Cache the resulting RDD hourRecordsSorted and print that as a list.
# In[112]:
hourCountPairTuple = badRecords.map(lambda log: (log.date_time.hour, 1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a + b)
hourRecordsSorted = hourRecordsSum.sortByKey().cache()
errHourList = hourRecordsSorted.collect()
print 'Top hours for 404 requests: %s' % errHourList
# In[113]:
# TEST Hourly 404 response codes (4h)
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
# #### **(4i) Exercise: Visualizing the 404 Response Codes by Hour**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" or "Bar" graph of the 404 response codes by hour.
# In[114]:
hoursWithErrors404 = hourRecordsSorted.map(lambda (day, num): day).collect()
errors404ByHours = hourRecordsSorted.map(lambda (day, num): num).collect()
# In[115]:
# TEST Visualizing the 404 Response Codes by Hour (4i)
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
# In[116]:
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
pass
| mit |
amolkahat/pandas | pandas/core/frame.py | 1 | 293955 | # pylint: disable=E1101
# pylint: disable=W0212,W0703,W0622
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
import collections
import functools
import itertools
import sys
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas._libs import lib, algos as libalgos
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature,
deprecate_kwarg)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas import compat
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, PY36, raise_with_traceback,
string_and_binary_types)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
construct_1d_arraylike_from_scalar,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetimetz,
is_datetime64_any_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
ensure_float64,
ensure_int64,
ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.concat import _get_sliced_frame_result_type
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCMultiIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms
from pandas.core import common as com
from pandas.core import nanops
from pandas.core import ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.config import get_option
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, ensure_index,
ensure_index_from_sequences)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.io.formats import console
from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
See Also
--------
merge_ordered : merge with optional filling/interpolation.
merge_asof : merge on nearest keys.
DataFrame.join : similar method using indices.
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
_accessors = set()
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif (isinstance(data, compat.Iterable)
and not isinstance(data, string_and_binary_types)):
if not isinstance(data, compat.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: {e}'.format(e=e))
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isnull()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or np.issubdtype(dtype, np.flexible):
# 1783
nan_dtype = object
else:
nan_dtype = dtype
v = construct_1d_arraylike_from_scalar(np.nan, len(index),
nan_dtype)
arrays.loc[missing] = [v] * missing.sum()
else:
keys = com.dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif (is_datetimetz(values) or is_extension_array_dtype(values)):
# GH19157
if columns is None:
columns = [0]
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '{dtype}' (Exception "
"was: {orig})".format(dtype=dtype,
orig=orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if (get_option('display.width') is not None or
console.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if console.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects. Can also be
called using `self @ other` in Python >= 3.5.
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
def __matmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.dot(other)
def __rmatmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
pandas.DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame
DataFrame : DataFrame object creation using constructor
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.Mapping
Return a collections.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1.0, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2.0, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
com.maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, com.maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
raise ValueError(
"DataFrame index must be unique for orient='index'."
)
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples())
else:
raise ValueError("orient '{o}' not understood".format(o=orient))
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', private_key=None,
auth_local_webserver=False, table_schema=None, location=None,
progress_bar=True, verbose=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
private_key : str, optional
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
pandas.read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth,
if_exists=if_exists, private_key=private_key,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, verbose=verbose)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: convert structured or record ndarray
to DataFrame.
numpy.recarray: ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""Construct a dataframe from a list of tuples
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""Read CSV file.
.. deprecated:: 0.21.0
Use :func:`pandas.read_csv` instead.
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export Stata binary dta files.
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
pandas.read_stata : Import Stata data files
pandas.io.stata.StataWriter : low-level writer for Stata data files
pandas.io.stata.StataWriter117 : low-level writer for version 117 files
Examples
--------
>>> data.to_stata('./data_file.dta')
Or with dates
>>> data.to_stata('./date_data_file.dta', {2 : 'tw'})
Alternatively you can create an instance of the StataWriter class
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
With dates:
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
index=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
String file path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip', compression='gzip')
>>> pd.read_parquet('df.parquet.gzip')
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, index=index, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1' : [1, 2, 3], 'col2' : [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None, table_id=None):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w", encoding="utf-8") as f:
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(compat.iteritems(counts))]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True`` the memory usage of the
index the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
sizes : Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
pandas.Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 (1+0j) 1 True
1 1 1.0 (1+0j) 1 True
2 1 1.0 (1+0j) 1 True
3 1 1.0 (1+0j) 1 True
4 1 1.0 (1+0j) 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""Put single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._convert_to_indexer(key, axis=1,
raise_missing=True)
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
pandas.eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
klass = _get_sliced_frame_result_type(values, self)
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
from pandas.core.series import _sanitize_index
# Explicitly copy here, instead of in _sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = _sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=None, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index, columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
dropped : pandas.DataFrame
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns
Series.drop : Return Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the selected axis
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3,0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super(DataFrame, self).drop(labels=labels, axis=axis,
index=index, columns=columns,
level=level, inplace=inplace,
errors=errors)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
method=method)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
missing = []
for col in keys:
if (is_scalar(col) or isinstance(col, tuple)) and col in self:
# tuples can be both column keys or list-likes
# if they are valid column keys, everything is fine
continue
elif is_scalar(col) and col not in self:
# tuples that are not column keys are considered list-like,
# not considered missing
missing.append(col)
elif (not is_list_like(col, allow_sets=False)
or getattr(col, 'ndim', 1) > 1):
raise TypeError('The parameter "keys" may only contain a '
'combination of valid column keys and '
'one-dimensional list-likes')
if missing:
raise KeyError('{}'.format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif (is_list_like(col)
and not (isinstance(col, tuple) and col in self)):
# all other list-likes (but avoid valid column keys)
col = list(col) # ensure iterator do not get read twice etc.
arrays.append(col)
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series()
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self._get_label_or_level_values(x, axis=axis)
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_index.__doc__)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order).
.. deprecated:: 0.20.0
Use :meth:`DataFrame.sort_index`
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2],
... 'b': list('abdcef'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 11 c 3.0
4 8 e 4.0
5 2 f 9.0
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "a".
>>> df.nlargest(3, 'a')
a b c
3 11 c 3.0
1 10 b 2.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'a', keep='last')
a b c
3 11 c 3.0
1 10 b 2.0
4 8 e 4.0
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'a', keep='all')
a b c
3 11 c 3.0
1 10 b 2.0
2 8 d NaN
4 8 e 4.0
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nlargest(3, ['a', 'c'])
a b c
4 8 e 4.0
3 11 c 3.0
1 10 b 2.0
Attempting to use ``nlargest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nlargest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nlargest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2],
... 'b': list('abdcef'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 11 c 3.0
4 8 e 4.0
5 2 f 9.0
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'a')
a b c
0 1 a 1.0
5 2 f 9.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'a', keep='last')
a b c
0 1 a 1.0
5 2 f 9.0
4 8 e 4.0
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'a', keep='all')
a b c
0 1 a 1.0
5 2 f 9.0
2 8 d NaN
4 8 e 4.0
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['a', 'c'])
a b c
0 1 a 1.0
5 2 f 9.0
4 8 e 4.0
Attempting to use ``nsmallest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nsmallest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nsmallest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(this, other, func):
# iterate over columns
return ops.dispatch_to_series(this, other, _arith_op)
else:
result = _arith_op(this.values, other.values)
return self._constructor(result,
index=new_index, columns=new_columns,
copy=False)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
assert left.index.equals(right.index)
if left._is_mixed_type or right._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
return ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None):
assert isinstance(other, Series)
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
assert left.columns.equals(right.index)
return ops.dispatch_to_series(left, right, func, axis="columns")
def _combine_const(self, other, func, errors='raise'):
assert lib.is_scalar(other) or np.ndim(other) == 0
return ops.dispatch_to_series(self, other, func)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame based on a
passed function.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : boolean, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
result : DataFrame
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 NaN
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1],}, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1],}, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
combined : DataFrame
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
# TODO(DatetimelikeArray): just use .asi8
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> boolean 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
raise_conflict : bool, default False
If True, will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Raises
------
ValueError
When `raise_conflict` is True and there's overlapping non-NA data.
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
_shared_docs['pivot'] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged :: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution('')
@Appender(_shared_docs['pivot'])
def pivot(self, index=None, columns=None, values=None):
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max median min
A C
bar large 5.500000 16 14.5 13
small 5.500000 15 14.5 14
foo large 2.000000 10 9.5 9
small 2.333333 12 11.0 8
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being re-organised from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
diffed : DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self,
key, # type: Union[str, List[str]]
ndim, # type: int
subset=None # type: Union[Series, DataFrame, None]
):
# type: (...) -> Union[Series, DataFrame]
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_doc = dedent("""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d,
axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
See also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = (super(DataFrame, self.T)
._aggregate(arg, *args, **kwargs))
result = result.T if result is not None else result
return result, how
return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
if axis == 1:
return super(DataFrame, self.T).transform(func, *args, **kwargs).T
return super(DataFrame, self).transform(func, *args, **kwargs)
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transforming type operations
Examples
--------
>>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Retuning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
Returns
-------
applied : Series or DataFrame
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See also
--------
DataFrame.apply : Apply a function along input axis of DataFrame
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False,
verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
Examples
--------
>>> import numpy as np
>>> histogram_intersection = lambda a, b: np.minimum(a, b
... ).sum().round(decimals=1)
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
elif method == 'kendall' or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError("method must be either 'pearson', "
"'spearman', or 'kendall', '{method}' "
"was supplied".format(method=method))
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
pandas.Series.cov : compute covariance with another Series
pandas.core.window.EWM.cov: exponential weighted sample covariance
pandas.core.window.Expanding.cov : expanding sample covariance
pandas.core.window.Rolling.cov : rolling sample covariance
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(other.corr, axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : boolean, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.shape: number of DataFrame rows and columns (including NA
elements)
DataFrame.isna: boolean same-sized DataFrame showing places of NA
elements
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis)))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if axis is None and filter_type == 'bool':
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
if (filter_type == 'bool' and is_object_dtype(values) and
axis is None):
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(self,
func=f,
result_type='expand',
ignore_failures=True)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError(
"Handling exception with filter_type {f} not"
"implemented.".format(f=filter_type))
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type {f}"
"not supported.".format(f=filter_type))
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explicit about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
numeric_only : boolean, default True
If False, the quantile of datetime and timedelta data will be
computed as well
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
'B': [pd.Timestamp('2010'),
pd.Timestamp('2011')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
See Also
--------
pandas.core.window.Rolling.quantile
numpy.percentile
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2],'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0},
docs={
'index': 'The index (row labels) of the DataFrame.',
'columns': 'The column labels of the DataFrame.'})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [ensure_index(columns), index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], compat.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data, sort=False)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com.values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = ibase.default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('{col:d} columns passed, passed data had '
'{con} columns'.format(col=len(columns),
con=len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = com.dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return u'{s}'.format(s=s)[:space].ljust(space)
| bsd-3-clause |
sfeeney/neuRRaLy | optimize_architecture.py | 1 | 32631 | import numpy as np
import numpy.random as npr
import astropy.stats as aps
import astropy.io.fits as apf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as mp
import subprocess as sp
import scipy.stats as ss
import scipy.signal as si
import matplotlib.cm as mpcm
import matplotlib.colors as mpc
import sklearn.neural_network as sk
import random as ra
######################################################################
def allocate_jobs(n_jobs, n_procs=1, rank=0):
n_j_allocated = 0
for i in range(n_procs):
n_j_remain = n_jobs - n_j_allocated
n_p_remain = n_procs - i
n_j_to_allocate = n_j_remain / n_p_remain
if rank == i:
return range(n_j_allocated, \
n_j_allocated + n_j_to_allocate)
n_j_allocated += n_j_to_allocate
def allocate_jobs_inc_time(n_jobs, n_procs=1, rank=0):
allocated = []
for i in range(n_jobs):
if rank == np.mod(n_jobs-i, n_procs):
allocated.append(i)
return allocated
def complete_array(target_distrib, use_mpi=False):
if use_mpi:
target = np.zeros(target_distrib.shape)
mpi.COMM_WORLD.Reduce(target_distrib, target, op=mpi.SUM, \
root=0)
else:
target = target_distrib
return target
######################################################################
# plotting settings
lw = 1.5
mp.rc('font', family = 'serif')
mp.rcParams['text.latex.preamble'] = [r'\boldmath']
mp.rcParams['axes.linewidth'] = lw
mp.rcParams['lines.linewidth'] = lw
cm = mpcm.get_cmap('plasma')
# useful constants
d2s = 24.0 * 3600.0
# settings
dataset = 'gloess' # 'gloess' or 'crts' or 'sim'
use_mpi = True
include_period = True
split_to_train = True
std_by_bin = False
test_training_length = False
n_rpt = 20
if dataset == 'gloess':
set_size = 4
elif dataset == 'crts':
set_size = 19 # 9
elif dataset == 'sim':
set_size = 250 # 9
base = dataset
if include_period:
base += '_inc_per'
if use_mpi:
import mpi4py.MPI as mpi
n_procs = mpi.COMM_WORLD.Get_size()
rank = mpi.COMM_WORLD.Get_rank()
else:
n_procs = 1
rank = 0
# switch on dataset
if dataset == 'gloess':
# dataset settings
# @TODO: might be able to push n_bins higher for this cadence
data_dir = 'data/gloess/'
n_bins = 50
# get training stars
cat_hdulist = apf.open(data_dir + 'gloess_cat.fit')
cols = cat_hdulist[1].columns
data = cat_hdulist[1].data
ids = data['Name']
fehs = data['__Fe_H_']
taus = data['FPer'] * d2s
types = data['RRL']
# check for correct type
rrab = (types == 'RRab')
ids = ids[rrab]
fehs = fehs[rrab]
taus = taus[rrab]
n_lc = len(ids)
# period distribution
tau_mean = np.mean(taus)
tau_std = np.std(taus)
# plot colours set by metallicities
feh_min = np.min(fehs)
feh_max = np.max(fehs)
feh_cols = (fehs - feh_min) / (feh_max - feh_min)
# read in lightcurves
cat_hdulist = apf.open(data_dir + 'gloess_lcs.fit')
cols = cat_hdulist[1].columns
data = cat_hdulist[1].data
binned_med_lcs = []
binned_mean_lcs = []
binned_mean_lc_stds = []
if rank == 0:
fig_sum, axes_sum = mp.subplots(1, 2, figsize=(16,5))
for i in range(n_lc):
# extract quantities of interest
inds = (data['Name'] == ids[i]) & (data['Flt'] == 'V')
phase = data['Phase'][inds]
mag = data['mag'][inds]
# calculate some binned statistics; no mag errors available
bins = np.linspace(0, 1, n_bins + 1)
meds, edges, i_bins = ss.binned_statistic(phase, \
mag - np.median(mag), \
statistic='median', \
bins=bins)
centers = (edges[0:-1] + edges[1:]) / 2.0
means = np.zeros(n_bins)
stds = np.zeros(n_bins)
for j in range(n_bins):
in_bin = (i_bins - 1 == j)
if in_bin.any():
means[j] = np.mean(mag[in_bin] - np.median(mag))
binned_med_lcs.append(meds)
binned_mean_lcs.append(means)
binned_mean_lc_stds.append(stds)
if rank == 0:
axes_sum[0].plot(centers, meds, color=cm(feh_cols[i]), alpha=0.4)
axes_sum[1].plot(centers, means, color=cm(feh_cols[i]), alpha=0.4)
elif dataset == 'crts':
# dataset settings
data_dir = 'data/crts_x_sdss/'
process_raw_lcs = False
n_bins = 25
threshold = 3.5
# get map between CRTS ID and ID number, along with peak time and
# period
css_id = []
css_id_num = []
css_period = []
css_peak = []
n_skip = 2
with open(data_dir + 'RRL_params') as f:
for i, l in enumerate(f):
if (i > n_skip - 1):
vals = [val for val in l.split()]
css_id.append(vals[0])
css_period.append(float(vals[4]))
css_peak.append(float(vals[9]))
css_id_num.append(vals[10])
# get training stars
hdulist = apf.open(data_dir + 'crts_bright_feh_info.fit')
cols = hdulist[1].columns
data = hdulist[1].data
ids = data['ID'][0]
fehs = data['FEH'][0]
taus = data['PER'][0]
mus = data['DM'][0]
# check for bad metallicities
bad_feh = (fehs < -3.0)
ids = ids[~bad_feh]
fehs = fehs[~bad_feh]
taus = taus[~bad_feh]
mus = mus[~bad_feh]
n_lc = len(ids)
# period distribution
tau_mean = np.mean(taus)
tau_std = np.std(taus)
# plot colours set by metallicities
feh_min = np.min(fehs)
feh_max = np.max(fehs)
feh_cols = (fehs - feh_min) / (feh_max - feh_min)
# loop through training set
binned_med_lcs = []
binned_mean_lcs = []
binned_mean_lc_stds = []
if rank == 0:
fig_sum, axes_sum = mp.subplots(1, 2, figsize=(16,5))
for i in range(n_lc):
# match IDs
ind = (j for j,v in enumerate(css_id) if v=='CSS_'+ids[i]).next()
# build lightcurves and save in simplified format, or read in
if process_raw_lcs:
# find matching entries in files
test = sp.Popen(['/usr/bin/grep ' + css_id_num[ind] + ' ' + \
data_dir + '/CSS_RR_phot/*phot'], \
shell=True, stdout=sp.PIPE)
output, err = test.communicate()
# parse lightcurves
time = []
mag = []
mag_err = []
lines = output.splitlines()
for line in lines:
vals = line.split(',')
time.append(float(vals[1]))
mag.append(float(vals[2]))
mag_err.append(float(vals[3]))
time = np.array(time)
mag = np.array(mag)
mag_err = np.array(mag_err)
# save to file
fname = data_dir + css_id[ind] + '_' + css_id_num[ind] + \
'_lc.txt'
np.savetxt(fname, \
np.column_stack((time, mag, mag_err)), \
fmt='%19.12e', header='time mag mag_err')
else:
# read lightcurves
fname = data_dir + css_id[ind] + '_' + css_id_num[ind] + \
'_lc.txt'
lc = np.genfromtxt(fname, names=True)
time = lc['time']
mag = lc['mag']
mag_err = lc['mag_err']
# what do the phase-wrapped lightcurves look like?
# 1007116003636; 0.5485033
period = taus[i]
phase = np.mod(time - css_peak[ind], period) / period
if False:
fig, axes = mp.subplots(1, 2, figsize=(16,5))
#nu = np.linspace(1.0, 3.0, 1000)
#power = aps.LombScargle(time, mag, mag_err).power(nu)
#nu, power = aps.LombScargle(time, mag, mag_err).autopower()
nu, power = aps.LombScargle(time, mag, mag_err).autopower(minimum_frequency=1.0, maximum_frequency=3.0)
print nu[np.argmax(power)]
axes[0].plot(phase, mag, '.', color=cm(feh_cols[i]), alpha=0.4)
axes[1].plot(nu, power, 'k-')
axes[1].axvline(1.0 / period, color='r', alpha=0.7, zorder=0)
mp.suptitle(css_id[ind] + ' / ' + css_id_num[ind])
#mp.show()
# calculate some binned statistics
bins = np.linspace(0, 1, n_bins + 1)
meds, edges, i_bins = ss.binned_statistic(phase, mag - np.median(mag), \
statistic='median', \
bins=bins)
centers = (edges[0:-1] + edges[1:]) / 2.0
means = np.zeros(n_bins)
stds = np.ones(n_bins) * 1e9
for j in range(n_bins):
in_bin = (i_bins - 1 == j)
if in_bin.any():
stds[j] = np.sqrt(1.0 / np.sum(mag_err[in_bin] ** -2))
means[j] = np.average(mag[in_bin] - np.median(mag), \
weights=mag_err[in_bin] ** -2)
binned_med_lcs.append(meds)
binned_mean_lcs.append(means)
binned_mean_lc_stds.append(stds)
if rank == 0:
axes_sum[0].plot(centers, meds, color=cm(feh_cols[i]), alpha=0.4)
axes_sum[1].plot(centers, means, color=cm(feh_cols[i]), alpha=0.4)
elif dataset == 'sim':
def set_phi_13(phi_1, phi_3):
phi_31 = 2.0 * np.pi + \
np.mod(phi_1 - 3.0 * phi_3, np.pi)
inds = phi_31 > 2.0 * np.pi
phi_31[inds] = np.pi + \
np.mod(phi_1[inds] - 3.0 * phi_3[inds], np.pi)
return phi_31
def set_feh(tau, phi_31):
return -5.038 - 5.394 * tau / 24.0 / 3600.0 + \
1.345 * phi_31
# settings
data_dir = 'data/asas/'
n_lc = 1000 #10000
n_fc = 3
n_samples = 1000
n_bins = 100
# stellar properties
tau_mean = 0.5 * d2s
tau_std = 0.1 * d2s
sigma_noise = 0.0 # 0.01
# stats from arXiv:0906.2199
raw_stats = np.genfromtxt(data_dir + 'fourier_decomp.txt')
stats = np.zeros((2 * n_fc, raw_stats.shape[0]))
for i in range(n_fc):
stats[i, :] = raw_stats[:, 1 + 2 * i]
stats[n_fc + i, :] = raw_stats[:, 2 * (i + 1)]
# some stars have negative amplitudes and small phases:
# shift so they're all in the same quadrant
weird_phase = stats[n_fc + i, :] < np.pi
stats[i, weird_phase] *= -1
stats[n_fc + i, weird_phase] += np.pi
#mp.plot(stats[i, :], stats[n_fc + i, :], '.')
#mp.plot(stats[i, weird_phase], stats[n_fc + i, weird_phase], 'r.')
#mp.show()
fc_mean = np.mean(stats, 1)
fc_cov = np.cov(stats)
# simulate fourier components, periods and metallicities
fcs = npr.multivariate_normal(fc_mean, fc_cov, n_lc)
taus = tau_mean + npr.randn(n_lc) * tau_std
phi_31s = set_phi_13(fcs[:, n_fc + 2], fcs[:, n_fc])
fehs = set_feh(taus, phi_31s)
if False:
mp.plot(phi_31s, fehs, '.')
phi_31_plot = np.linspace(np.min(phi_31s), np.max(phi_31s))
mp.plot(phi_31_plot, set_feh(np.mean(taus), phi_31_plot))
mp.xlabel(r'$\phi_{31}$')
mp.ylabel(r'${\rm [Fe/H]}$')
mp.xlim(np.min(phi_31s), np.max(phi_31s))
mp.ylim(np.min(fehs), np.max(fehs))
mp.show()
# plot colours set by metallicities
feh_min = np.min(fehs)
feh_max = np.max(fehs)
feh_cols = (fehs - feh_min) / (feh_max - feh_min)
# simulate binned lightcurves
binned_med_lcs = []
binned_mean_lcs = []
binned_mean_lc_stds = []
if rank == 0:
fig_sum, axes_sum = mp.subplots(1, 2, figsize=(16,5))
for i in range(n_lc):
# simulate lightcurves
phase = npr.rand(n_samples)
mag = npr.randn(n_samples) * sigma_noise
for j in range(n_fc):
mag += fcs[i, j] * np.sin(2.0 * np.pi * (j + 1) * phase + \
fcs[i, n_fc + j])
#mp.scatter(phase, mag)
#mp.show()
# calculate some binned statistics
bins = np.linspace(0, 1, n_bins + 1)
meds, edges, i_bins = ss.binned_statistic(phase, mag - np.median(mag), \
statistic='median', \
bins=bins)
centers = (edges[0:-1] + edges[1:]) / 2.0
means = np.zeros(n_bins)
stds = np.ones(n_bins) * 1e9
for j in range(n_bins):
in_bin = (i_bins - 1 == j)
if in_bin.any():
stds[j] = sigma_noise / np.sqrt(np.sum(in_bin))
means[j] = np.mean(mag[in_bin] - np.median(mag))
binned_med_lcs.append(meds)
binned_mean_lcs.append(means)
binned_mean_lc_stds.append(stds)
if n_lc < 1000 and rank == 0:
axes_sum[0].plot(centers, meds, color=cm(feh_cols[i]), alpha=0.4)
axes_sum[1].plot(centers, means, color=cm(feh_cols[i]), alpha=0.4)
# convert binned stats to useful dtype
binned_med_lcs = np.array(binned_med_lcs)
binned_mean_lcs = np.array(binned_mean_lcs)
binned_mean_lc_stds = np.array(binned_mean_lc_stds)
# summarize over stars to obtain median/mean lc shape
med_lc = np.zeros(n_bins)
mean_lc = np.zeros(n_bins)
std_lc = np.zeros(n_bins)
for i in range(n_bins):
med_lc[i] = np.nanmedian(binned_med_lcs[:, i])
mean_lc[i] = np.nanmean(binned_mean_lcs[:, i])
std_lc[i] = np.nanstd(binned_mean_lcs[:, i])
# check for outliers, taking into account intrinsic scatter in each
# bin
if dataset == 'crts':
is_out = []
for i in range(n_bins):
is_out.append(np.abs(mean_lc[i] - binned_mean_lcs[:, i]) / \
np.sqrt(std_lc[i] ** 2 + \
binned_mean_lc_stds[:, i] ** 2) > threshold)
out_count = np.sum(np.array(is_out), 0)
is_out = (out_count > 0)
for i in range(n_lc):
if out_count[i] > 0 and rank == 0:
print 'reject CSS_' + ids[i]
axes_sum[0].plot(centers, binned_med_lcs[i], 'k')
axes_sum[1].plot(centers, binned_mean_lcs[i], 'k')
else:
is_out = np.zeros(n_lc, dtype='bool')
# finish off overlays of lightcurves
if rank == 0:
if n_lc < 1000:
axes_sum[0].set_xlabel('phase')
axes_sum[0].set_ylabel('median mag')
axes_sum[0].set_ylim(-1.2, 0.6)
axes_sum[1].set_xlabel('phase')
axes_sum[1].set_ylabel('iv-weighted mean mag')
axes_sum[1].set_ylim(-1.2, 0.6)
fig_sum.savefig(dataset + '_mean_median_phase-wrapped_mags.pdf', \
bbox_inches='tight')
#mp.show()
mp.close()
# recalculate clean median and mean lightcurves
med_lc_clean = np.zeros(n_bins)
mean_lc_clean = np.zeros(n_bins)
std_lc_clean = np.zeros(n_bins)
n_lc_clean = np.zeros(n_bins)
for i in range(n_bins):
med_lc_clean[i] = np.nanmedian(binned_med_lcs[~is_out, i])
mean_lc_clean[i] = np.nanmean(binned_mean_lcs[~is_out, i])
std_lc_clean[i] = np.nanstd(binned_mean_lcs[~is_out, i])
n_lc_clean[i] = np.count_nonzero(~np.isnan(binned_mean_lcs[~is_out, i]))
# plot median and mean lightcurves, with and without outliers
if rank == 0:
fig_stats, axes_stats = mp.subplots(1, 2, figsize=(16,5))
axes_stats[0].plot(centers, med_lc, 'k-', label='median')
axes_stats[0].plot(centers, med_lc_clean, 'r--', label='median (clean)')
axes_stats[1].plot(centers, mean_lc, 'k-', label='mean')
axes_stats[1].plot(centers, mean_lc_clean, 'r--', label='mean (clean)')
axes_stats[0].set_xlabel('phase')
axes_stats[0].set_ylabel('median mag')
axes_stats[0].set_ylim(-0.8, 0.3)
axes_stats[0].legend(loc='upper left')
axes_stats[1].set_xlabel('phase')
axes_stats[1].set_ylabel('mean mag')
axes_stats[1].set_ylim(-0.8, 0.3)
axes_stats[1].legend(loc='upper left')
mp.savefig(dataset + '_mean_median_lightcurves.pdf', \
bbox_inches='tight')
mp.close()
#mp.show()
# divide through median/mean lightcurve, ditching outliers
if n_lc < 1000 and rank == 0:
feh_sort = np.argsort(fehs)
fig, axes = mp.subplots(1, 2, figsize=(16,5))
for j in range(n_lc):
i = feh_sort[j]
if not is_out[i]:
axes[0].plot(centers, (binned_med_lcs[i, :] - med_lc), \
color=cm(feh_cols[i]), alpha=0.4)
axes[1].plot(centers, (binned_mean_lcs[i, :] - mean_lc), \
color=cm(feh_cols[i]), alpha=0.4)
axes[0].set_xlabel('phase')
axes[0].set_ylabel('mag / med(mag)')
axes[1].set_xlabel('phase')
axes[1].set_ylabel('mag / mean(mag)')
fig.savefig(dataset + '_mean_median_phase-wrapped_scaled_mags.pdf', \
bbox_inches='tight')
mp.close()
#mp.show()
# test out metallicity dependence in bins
n_bins_feh = 3
feh_min = [-10.0, -1.7, -1.1]
feh_max = [-1.7, -1.1, 10.0]
cols = [cm(0.2), cm(0.5), cm(0.8)]
if rank == 0:
fig, axes = mp.subplots(2, 2, figsize=(16,10))
med_lc_all_feh = med_lc_clean
mean_lc_all_feh = mean_lc_clean
n_lc_all_feh = n_lc_clean
for k in range(n_bins_feh):
# summarize over stars to obtain median/mean lc shape
feh_inds = (fehs >= feh_min[k]) & (fehs < feh_max[k]) & \
~is_out
if rank == 0:
to_fmt = 'bin {:d} has {:5.2f} < Fe/H < {:5.2f}'
print to_fmt.format(k, np.min(fehs[feh_inds]), \
np.max(fehs[feh_inds]))
med_lc = np.zeros(n_bins)
mean_lc = np.zeros(n_bins)
n_lc_bin = np.zeros(n_bins)
for i in range(n_bins):
med_lc[i] = np.nanmedian(binned_med_lcs[feh_inds, i])
mean_lc[i] = np.nanmean(binned_mean_lcs[feh_inds, i])
n_lc_bin[i] = np.count_nonzero(~np.isnan(binned_mean_lcs[feh_inds, i]))
if rank == 0:
label = '${:4.1f} '.format(feh_min[k]) + r'\leq' + \
' [Fe/H] < {:4.1f}$'.format(feh_max[k])
#axes[0, 0].plot(centers, med_lc, color=cols[k], label=label)
#axes[0, 1].plot(centers, mean_lc, color=cols[k], label=label)
#axes[1, 0].plot(centers, med_lc - med_lc_all_feh, color=cols[k], label=label)
#axes[1, 1].plot(centers, mean_lc - mean_lc_all_feh, color=cols[k], label=label)
axes[0, 0].errorbar(centers, med_lc, std_lc_clean / np.sqrt(n_lc), color=cols[k], label=label)
axes[0, 1].errorbar(centers, mean_lc, std_lc_clean / np.sqrt(n_lc), color=cols[k], label=label)
axes[1, 0].errorbar(centers, med_lc - med_lc_all_feh, std_lc_clean * np.sqrt(1.0 / n_lc + 1.0 / n_lc_bin), color=cols[k], label=label)
axes[1, 1].errorbar(centers, mean_lc - mean_lc_all_feh, std_lc_clean * np.sqrt(1.0 / n_lc + 1.0 / n_lc_bin), color=cols[k], label=label)
if rank == 0:
axes[0, 0].set_xlabel('phase')
axes[0, 0].set_ylabel('median mag')
axes[0, 0].set_ylim(-0.8, 0.3)
axes[0, 0].legend(loc='upper left', fontsize=12)
axes[0, 1].set_xlabel('phase')
axes[0, 1].set_ylabel('mean mag')
axes[0, 1].set_ylim(-0.8, 0.3)
axes[0, 1].legend(loc='upper left', fontsize=12)
axes[1, 0].set_xlabel('phase')
axes[1, 0].set_ylabel('median mag - median shape')
axes[1, 0].set_ylim(-0.08, 0.08)
axes[1, 0].legend(loc='upper center', fontsize=12)
axes[1, 1].set_xlabel('phase')
axes[1, 1].set_ylabel('mean mag - mean shape')
axes[1, 1].set_ylim(-0.08, 0.08)
axes[1, 1].legend(loc='upper center', fontsize=12)
mp.savefig(dataset + '_mean_median_lightcurves_feh_dep.pdf', \
bbox_inches='tight')
mp.close()
#mp.show()
'''
blah = np.zeros(n_bins * n_lc)
for i in range(n_lc):
blah[i * n_bins: (i + 1) * n_bins] = binned_mean_lcs[i, :]# - mean_lc
mp.hist(blah, bins = 20)
mp.show()
exit()
'''
# should we split into training and test sets or not?
if split_to_train:
# split into training and test sets. i've coded this as
# equal splits, but one set will probably always be different
n_split = int(np.floor(n_lc / float(set_size)))
if rank == 0:
print 'splitting into committee of {:d} nets'.format(n_split)
set_ids = range(n_lc)
ra.shuffle(set_ids)
if use_mpi:
set_ids = mpi.COMM_WORLD.bcast(set_ids, root=0)
test_ids = np.zeros((n_lc, n_split), dtype=bool)
test_ids[:, 0] = [i < set_size for i in set_ids]
for j in range(1, n_split - 1):
test_ids[:, j] = [i < set_size * (j + 1) and \
i >= set_size * j for i in set_ids]
test_ids[:, -1] = [i >= set_size * (n_split - 1) for i in set_ids]
for j in range(n_split):
fmt_str = 'committee {:d}: {:d} training, {:d} testing'
if rank == 0:
print fmt_str.format(j + 1, np.sum(~test_ids[:, j]), \
np.sum(test_ids[:, j]))
# define neural network inputs
#nn_inputs = binned_mean_lcs - mean_lc
nn_inputs = binned_mean_lcs[~is_out, :] - mean_lc
feh_mean = np.mean(fehs[~is_out])
feh_std = np.std(fehs[~is_out])
nn_outputs = (fehs[~is_out] - feh_mean) / feh_std
n_lc = np.sum(~is_out)
if std_by_bin:
for i in range(n_bins):
avg_nn_inputs = np.mean(nn_inputs[:, i])
std_nn_inputs = np.std(nn_inputs[:, i])
nn_inputs[:, i] = (nn_inputs[:, i] - avg_nn_inputs) / std_nn_inputs
else:
avg_nn_inputs = np.mean(nn_inputs.flatten())
std_nn_inputs = np.std(nn_inputs.flatten())
nn_inputs = (nn_inputs - avg_nn_inputs) / std_nn_inputs
if include_period:
nn_inputs = np.append(nn_inputs, \
(taus[~is_out, None] - tau_mean) / tau_std, 1)
# dependence on training length
if test_training_length:
#max_its = np.array([3, 10, 30, 100, 300, 1000, 3000, 10000])
#max_its = np.array([3, 10, 30, 100, 300, 1000, 3000])
#max_its = np.array([25, 50, 100, 200, 300, 400, 500])
max_its = np.array([100, 300, 1000, 3000])
n_max_its = len(max_its)
dcol = 1.0 / float(n_max_its - 1)
seeds = np.random.randint(102314, 221216, n_rpt)
if use_mpi:
mpi.COMM_WORLD.Bcast(seeds, root=0)
feh_pred = np.zeros((n_lc, n_max_its, n_rpt))
chisq = np.zeros(n_max_its)
chisq_core = np.zeros(n_max_its)
feh_pred_loc = np.zeros((n_lc, n_max_its, n_rpt))
chisq_loc = np.zeros(n_max_its)
chisq_core_loc = np.zeros(n_max_its)
n_err_bins = 100
err_bins = np.linspace(-5.0, 5.0, n_err_bins + 1)
err_bin_mids = (err_bins[1:] + err_bins[:-1]) / 2.0
binned_errs = np.zeros((n_err_bins, n_max_its))
binned_errs_loc = np.zeros((n_err_bins, n_max_its))
job_list = allocate_jobs_inc_time(n_max_its, n_procs, rank)
print 'process id {:d}: jobs'.format(rank), job_list
#for i in range(n_max_its):
for i in job_list:
n_lc_core = 0
for j in range(n_rpt):
nn = sk.MLPRegressor(hidden_layer_sizes=(20,), \
activation='logistic', solver='lbfgs', \
alpha=0.1, batch_size='auto', \
learning_rate='constant', \
learning_rate_init=0.001, power_t=0.5, \
max_iter=max_its[i], shuffle=True, \
random_state=seeds[j], tol=0.000, \
verbose=False, warm_start=False, \
momentum=0.9, nesterovs_momentum=True, \
early_stopping=False, validation_fraction=0.1, \
beta_1=0.9, beta_2=0.999, epsilon=1e-08)
for k in range(n_split):
nn.fit(nn_inputs[~test_ids[:, k], :], \
nn_outputs[~test_ids[:, k]])
feh_pred_loc[test_ids[:, k], i, j] = \
nn.predict(nn_inputs[test_ids[:, k], :])
res = feh_pred_loc[:, i, j] - nn_outputs
res_rej = np.abs(res) > 1.0
chisq_loc[i] += np.sum((res) ** 2)
chisq_core_loc[i] += np.sum((res[~res_rej]) ** 2)
n_lc_core += np.sum(~res_rej)
binned_errs_loc[:, i] += np.histogram(res, bins=err_bins)[0]
chisq_core_loc[i] /= n_lc_core
print n_lc_core, n_lc
print 'n_max_its step {:d} of {:d} complete'.format(i + 1, \
n_max_its)
chisq_loc /= n_rpt * n_lc
if use_mpi:
mpi.COMM_WORLD.barrier()
chisq = complete_array(chisq_loc, use_mpi)
chisq_core = complete_array(chisq_core_loc, use_mpi)
feh_pred = complete_array(feh_pred_loc, use_mpi)
binned_errs = complete_array(binned_errs_loc, use_mpi)
# find optimum training length
opt_ind = np.argmin(chisq)
opt_ind_core = np.argmin(chisq_core)
if rank == 0:
print 'optimum chisq {:f} at {:d}'.format(chisq[opt_ind], \
max_its[opt_ind])
print 'or {:f} at {:d} w/out failures'.format(chisq_core[opt_ind_core], \
max_its[opt_ind_core])
# plot on main process only
if rank == 0:
# plot best performing network
plot_min = -3.0 * feh_std + feh_mean
plot_max = 3.0 * feh_std + feh_mean
mp.plot([plot_min, plot_max], [plot_min, plot_max], 'k')
mp.scatter(nn_outputs * feh_std + feh_mean, \
np.mean(feh_pred[:, opt_ind_core, :], -1) * \
feh_std + feh_mean)
mp.xlabel(r'$[Fe/H]_{\rm true}$')
mp.ylabel(r'$\langle[Fe/H]_{\rm pred}\rangle$')
mp.xlim(plot_min, plot_max)
mp.ylim(plot_min, plot_max)
mp.savefig(base + '_opt_its_predictions.pdf', \
bbox_inches='tight')
mp.close()
# plot chi_sq as function of max_its
mp.semilogx(max_its, chisq, label='all predictions')
mp.semilogx(max_its, chisq_core, label='failures removed')
mp.xlabel(r'${\rm n_{its}}$')
mp.ylabel(r'$\chi^2/{\rm DOF}$')
mp.xlim(np.min(max_its), np.max(max_its))
mp.legend(loc='upper right')
mp.savefig(base + '_max_its_performance.pdf', \
bbox_inches='tight')
mp.close()
# plot residuals distribution as function of max_its
res_max = 0.0
for i in range(n_max_its):
res_max_temp = np.max(np.abs(err_bin_mids[binned_errs[:, i] > 0]))
if res_max_temp > res_max:
res_max = res_max_temp
n_binned_max = np.max(binned_errs)
fig, axes = mp.subplots(2, 2, figsize=(16, 5), sharex=True, sharey=True)
print res_max
for i in range(n_max_its):
i_row = i / 2
i_col = np.mod(i, 2)
axes[i_row, i_col].step(err_bin_mids, binned_errs[:, i])
axes[i_row, i_col].text(-0.95 * res_max, 0.9 * n_binned_max, \
'{:d} iterations'.format(max_its[i]))
axes[i_row, i_col].set_xlim(-res_max, res_max)
if i_row == 1:
axes[i_row, i_col].set_xlabel(r'$\Delta[Fe/H]$')
if i_col == 0:
axes[i_row, i_col].set_ylabel(r'$N(\Delta[Fe/H])$')
fig.subplots_adjust(wspace=0, hspace=0)
mp.savefig(base + '_max_its_residuals.pdf', \
bbox_inches='tight')
if use_mpi:
mpi.Finalize()
exit()
# dependence on alpha and n_hidden
n_grid_hid = 10
n_grid_alpha = 25
dcol_hid = 1.0 / float(n_grid_hid - 1)
dcol_alpha = 1.0 / float(n_grid_alpha - 1)
n_hidden = np.linspace(1, 10, n_grid_hid, dtype=int) * 100
n_hidden = np.linspace(1, 10, n_grid_hid, dtype=int) * 4
#alpha = np.logspace(-7, 0, n_grid_alpha)
alpha = np.logspace(-4, 0, n_grid_alpha)
alpha = np.logspace(-6, 0, n_grid_alpha)
seeds = np.random.randint(102314, 221216, n_rpt)
if use_mpi:
mpi.COMM_WORLD.Bcast(seeds, root=0)
chisq = np.zeros((n_grid_hid, n_grid_alpha))
chisq_loc = np.zeros((n_grid_hid, n_grid_alpha))
feh_pred = np.zeros((n_lc, n_grid_hid, n_grid_alpha, n_rpt))
feh_pred_loc = np.zeros((n_lc, n_grid_hid, n_grid_alpha, n_rpt))
job_list = allocate_jobs(n_grid_alpha, n_procs, rank)
print 'process id {:d}: jobs'.format(rank), job_list
for i in range(n_grid_hid):
for j in job_list:
print 'n_hidden gridpoint {:d},'.format(i + 1), \
'alpha gridpoint {:d}'.format(j + 1)
for k in range(n_rpt):
#activation='tanh', solver='lbfgs', \
nn = sk.MLPRegressor(hidden_layer_sizes=(n_hidden[i],), \
activation='logistic', solver='lbfgs', \
alpha=alpha[j], batch_size='auto', \
learning_rate='constant', \
learning_rate_init=0.001, power_t=0.5, \
max_iter=100, shuffle=True, \
random_state=seeds[k], \
tol=0.0, verbose=False, warm_start=False, \
momentum=0.9, nesterovs_momentum=True, \
early_stopping=False, validation_fraction=0.1, \
beta_1=0.9, beta_2=0.999, epsilon=1e-08)
if split_to_train:
for m in range(n_split):
nn.fit(nn_inputs[~test_ids[:, m], :], \
nn_outputs[~test_ids[:, m]])
feh_pred_loc[test_ids[:, m], i, j, k] = \
nn.predict(nn_inputs[test_ids[:, m], :])
else:
nn.fit(nn_inputs, nn_outputs)
feh_pred_loc[:, i, j, k] = nn.predict(nn_inputs)
res = feh_pred_loc[:, i, j, k] - nn_outputs
chisq_loc[i, j] += np.sum(res ** 2)
chisq_loc /= n_rpt * n_lc
if use_mpi:
mpi.COMM_WORLD.barrier()
chisq = complete_array(chisq_loc, use_mpi)
feh_pred = complete_array(feh_pred_loc, use_mpi)
# find optimum n_hidden and alpha
opt_ind = np.unravel_index(np.argmin(chisq), (n_grid_hid, n_grid_alpha))
if rank == 0:
print 'optimum chisq {:f} at {:d}, {:e}'.format(chisq[opt_ind], \
n_hidden[opt_ind[0]], \
alpha[opt_ind[1]])
# save results to file! but what else to save?
#output_file = open(base + '_opt_alpha_n_hidden_predictions.dat', 'wb')
#feh_pred.tofile(output_file)
#output_file.close()
# no point duplicating plots
if rank == 0:
# plot best performing network
plot_min = -3.0 * feh_std + feh_mean
plot_max = 3.0 * feh_std + feh_mean
mp.plot([plot_min, plot_max], [plot_min, plot_max], 'k')
mp.scatter(nn_outputs * feh_std + feh_mean, \
np.mean(feh_pred[:, opt_ind[0], opt_ind[1], :], -1) * \
feh_std + feh_mean)
mp.xlabel(r'$[Fe/H]_{\rm true}$')
mp.ylabel(r'$\langle[Fe/H]_{\rm pred}\rangle$')
mp.xlim(plot_min, plot_max)
mp.ylim(plot_min, plot_max)
mp.savefig(base + '_opt_alpha_n_hidden_predictions.pdf', \
bbox_inches='tight')
mp.close()
# summary plots
fig, axes = mp.subplots(1, 2, figsize=(16, 5))
for i in range(n_grid_hid):
axes[0].semilogx(alpha, chisq[i, :], color=cm(dcol_hid * i))
for i in range(n_grid_alpha):
axes[1].plot(n_hidden, chisq[:, i], color=cm(dcol_alpha * i))
axes[0].set_xlabel(r'$\alpha$')
axes[0].set_ylabel(r'$\sum (Z_{\rm pred} - Z_{\rm true})^2$')
axes[1].set_xlabel(r'$n_{\rm hidden}$')
axes[1].set_ylabel(r'$\sum (Z_{\rm pred} - Z_{\rm true})^2$')
mp.savefig(base + '_alpha_n_hidden_1d_performance.pdf', \
bbox_inches='tight')
mp.close()
# 2D plot
fig = mp.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(chisq, cmap = mpcm.plasma, interpolation = 'nearest')
mp.colorbar(cax)
xticks = ax.get_xticks()
yticks = ax.get_yticks()
ax.set_xticklabels([''] + ['{:6.1e}'.format(alpha[int(x)]) \
for x in xticks[1:-1]])
ax.set_yticklabels([''] + ['{:d}'.format(n_hidden[int(y)]) \
for y in yticks[1:-1]])
ax.set_xlabel(r'$\alpha$')
ax.xaxis.set_label_position('top')
ax.set_ylabel(r'$n_{\rm hidden}$')
mp.savefig(base + '_alpha_n_hidden_performance.pdf', \
bbox_inches='tight')
mp.close()
# @TODO: SAVE TO FILE (networks, eventually)
# @TODO: redo alpha n_hidden with 1000 iterations
# - then turn on tolerance to see if results same w/ speedup
# @TODO: or loop over max its
# @TODO: increase n_repeat
if use_mpi:
mpi.Finalize()
| mit |
toobaz/pandas | pandas/io/html.py | 1 | 34063 | """:mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
from collections import abc
import numbers
import os
import re
from pandas.compat import raise_with_traceback
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError, EmptyDataError
from pandas.core.dtypes.common import is_list_like
from pandas import Series
from pandas.io.common import _is_url, _validate_header_arg, urlopen
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def _importers():
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
bs4 = import_optional_dependency("bs4", raise_on_missing=False, on_version="ignore")
_HAS_BS4 = bs4 is not None
lxml = import_optional_dependency(
"lxml.etree", raise_on_missing=False, on_version="ignore"
)
_HAS_LXML = lxml is not None
html5lib = import_optional_dependency(
"html5lib", raise_on_missing=False, on_version="ignore"
)
_HAS_HTML5LIB = html5lib is not None
_IMPORTS = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}")
def _remove_whitespace(s, regex=_RE_WHITESPACE):
"""Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : regex
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(" ", s.strip())
def _get_skiprows(skiprows):
"""Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
start, step = skiprows.start or 0, skiprows.step or 1
return list(range(start, skiprows.stop, step))
elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):
return skiprows
elif skiprows is None:
return 0
raise TypeError(
"%r is not a valid type for skipping rows" % type(skiprows).__name__
)
def _read(obj):
"""Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if _is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, "read"):
text = obj.read()
elif isinstance(obj, (str, bytes)):
text = obj
try:
if os.path.isfile(text):
with open(text, "rb") as f:
return f.read()
except (TypeError, ValueError):
pass
else:
raise TypeError("Cannot read object of type %r" % type(obj).__name__)
return text
class _HtmlFrameParser:
"""Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
.. versionadded:: 0.23.0
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
.. versionadded:: 0.23.0
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_attr_getter`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_thead_tr`
* :func:`_parse_tbody_tr`
* :func:`_parse_tfoot_tr`
* :func:`_parse_tables`
* :func:`_equals_tag`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs, encoding, displayed_only):
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
self.displayed_only = displayed_only
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables)
def _attr_getter(self, obj, attr):
"""
Return the attribute value of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
attr : str or unicode
The attribute, such as "colspan"
Returns
-------
str or unicode
The attribute value.
"""
# Both lxml and BeautifulSoup have the same implementation:
return obj.get(attr)
def _text_getter(self, obj):
"""
Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise AbstractMethodError(self)
def _parse_td(self, obj):
"""Return the td elements from a row element.
Parameters
----------
obj : node-like
A DOM <tr> node.
Returns
-------
list of node-like
These are the elements of each row, i.e., the columns.
"""
raise AbstractMethodError(self)
def _parse_thead_tr(self, table):
"""
Return the list of thead row elements from the parsed table element.
Parameters
----------
table : a table element that contains zero or more thead elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tbody_tr(self, table):
"""
Return the list of tbody row elements from the parsed table element.
HTML5 table bodies consist of either 0 or more <tbody> elements (which
only contain <tr> elements) or 0 or more <tr> elements. This method
checks for both structures.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tfoot_tr(self, table):
"""
Return the list of tfoot row elements from the parsed table element.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tables(self, doc, match, attrs):
"""
Return all tables from the parsed DOM.
Parameters
----------
doc : the DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
multiple tables on a page.
Raises
------
ValueError : `match` does not match any text in the document.
Returns
-------
list of node-like
HTML <table> elements to be parsed into raw data.
"""
raise AbstractMethodError(self)
def _equals_tag(self, obj, tag):
"""
Return whether an individual DOM node matches a tag
Parameters
----------
obj : node-like
A DOM node.
tag : str
Tag name to be checked for equality.
Returns
-------
boolean
Whether `obj`'s tag name is `tag`
"""
raise AbstractMethodError(self)
def _build_doc(self):
"""
Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
node-like
The DOM from which to parse the table element.
"""
raise AbstractMethodError(self)
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, "th") for t in self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer
def _expand_colspan_rowspan(self, rows):
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
remainder = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
rowspan = int(self._attr_getter(td, "rowspan") or 1)
colspan = int(self._attr_getter(td, "colspan") or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [
x
for x in tbl_list
if "display:none"
not in getattr(x, attr_name).get("style", "").replace(" ", "")
]
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer("table")
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
tables = doc.find_all(element_name, attrs=attrs)
if not tables:
raise ValueError("No tables found")
result = []
unique_tables = set()
tables = self._handle_hidden_tables(tables, "attrs")
for table in tables:
if self.displayed_only:
for elem in table.find_all(style=re.compile(r"display:\s*none")):
elem.decompose()
if table not in unique_tables and table.find(text=match) is not None:
result.append(table)
unique_tables.add(table)
if not result:
raise ValueError(
"No tables found matching pattern {patt!r}".format(patt=match.pattern)
)
return result
def _text_getter(self, obj):
return obj.text
def _equals_tag(self, obj, tag):
return obj.name == tag
def _parse_td(self, row):
return row.find_all(("td", "th"), recursive=False)
def _parse_thead_tr(self, table):
return table.select("thead tr")
def _parse_tbody_tr(self, table):
from_tbody = table.select("tbody tr")
from_root = table.find_all("tr", recursive=False)
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.select("tfoot tr")
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise ValueError("No text parsed from document: {doc}".format(doc=self.io))
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
return BeautifulSoup(
self._setup_build_doc(), features="html5lib", from_encoding=self.encoding
)
def _build_xpath_expr(attrs):
"""Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if "class_" in attrs:
attrs["class"] = attrs.pop("class_")
s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()]
return "[{expr}]".format(expr=" and ".join(s))
_re_namespace = {"re": "http://exslt.org/regular-expressions"}
_valid_schemes = "http", "file", "ftp"
class _LxmlFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
This parser can only handle HTTP, FTP, and FILE urls.
See Also
--------
_HtmlFrameParser
_BeautifulSoupLxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`_HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _text_getter(self, obj):
return obj.text_content()
def _parse_td(self, row):
# Look for direct children only: the "row" element here may be a
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath("./td|./th")
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
query = "//table//*[re:test(text(), {patt!r})]/ancestor::table"
xpath_expr = query.format(patt=pattern)
# if any table attributes were given build an xpath expression to
# search for them
if kwargs:
xpath_expr += _build_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
tables = self._handle_hidden_tables(tables, "attrib")
if self.displayed_only:
for table in tables:
# lxml utilizes XPATH 1.0 which does not have regex
# support. As a result, we find all elements with a style
# attribute and iterate them to check for display:none
for elem in table.xpath(".//*[@style]"):
if "display:none" in elem.attrib.get("style", "").replace(" ", ""):
elem.getparent().remove(elem)
if not tables:
raise ValueError(
"No tables found matching regex {patt!r}".format(patt=pattern)
)
return tables
def _equals_tag(self, obj, tag):
return obj.tag == tag
def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if _is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, IOError) as e:
# if the input is a blob of html goop
if not _is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, "text_content"):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r
def _parse_thead_tr(self, table):
rows = []
for thead in table.xpath(".//thead"):
rows.extend(thead.xpath("./tr"))
# HACK: lxml does not clean up the clearly-erroneous
# <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add
# the <thead> and _pretend_ it's a <tr>; _parse_td() will find its
# children as though it's a <tr>.
#
# Better solution would be to use html5lib.
elements_at_root = thead.xpath("./td|./th")
if elements_at_root:
rows.append(thead)
return rows
def _parse_tbody_tr(self, table):
from_tbody = table.xpath(".//tbody//tr")
from_root = table.xpath("./tr")
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.xpath(".//tfoot//tr")
def _expand_elements(body):
lens = Series([len(elem) for elem in body])
lens_max = lens.max()
not_max = lens[lens != lens_max]
empty = [""]
for ind, length in not_max.items():
body[ind] += empty * (lens_max - length)
def _data_to_frame(**kwargs):
head, body, foot = kwargs.pop("data")
header = kwargs.pop("header")
kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"])
if head:
body = head + body
# Infer header when there is a <thead> or top <th>-only rows
if header is None:
if len(head) == 1:
header = 0
else:
# ignore all-empty-text rows
header = [i for i, row in enumerate(head) if any(text for text in row)]
if foot:
body += foot
# fill out elements of body that are "ragged"
_expand_elements(body)
tp = TextParser(body, header=header, **kwargs)
df = tp.read()
return df
_valid_parsers = {
"lxml": _LxmlFrameParser,
None: _LxmlFrameParser,
"html5lib": _BeautifulSoupHtml5LibFrameParser,
"bs4": _BeautifulSoupHtml5LibFrameParser,
}
def _parser_dispatch(flavor):
"""Choose the parser based on the input flavor.
Parameters
----------
flavor : str
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError(
"{invalid!r} is not a valid flavor, valid flavors "
"are {valid}".format(invalid=flavor, valid=valid_parsers)
)
if flavor in ("bs4", "html5lib"):
if not _HAS_HTML5LIB:
raise ImportError("html5lib not found, please install it")
if not _HAS_BS4:
raise ImportError("BeautifulSoup4 (bs4) not found, please install it")
# Although we call this above, we want to raise here right before use.
bs4 = import_optional_dependency("bs4") # noqa:F841
else:
if not _HAS_LXML:
raise ImportError("lxml not found, please install it")
return _valid_parsers[flavor]
def _print_as_set(s):
return "{" + "{arg}".format(arg=", ".join(pprint_thing(el) for el in s)) + "}"
def _validate_flavor(flavor):
if flavor is None:
flavor = "lxml", "bs4"
elif isinstance(flavor, str):
flavor = (flavor,)
elif isinstance(flavor, abc.Iterable):
if not all(isinstance(flav, str) for flav in flavor):
raise TypeError(
"Object of type {typ!r} is not an iterable of "
"strings".format(typ=type(flavor).__name__)
)
else:
fmt = "{flavor!r}" if isinstance(flavor, str) else "{flavor}"
fmt += " is not a valid flavor"
raise ValueError(fmt.format(flavor=flavor))
flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)
if not flavor_set & valid_flavors:
raise ValueError(
"{invalid} is not a valid set of flavors, valid "
"flavors are {valid}".format(
invalid=_print_as_set(flavor_set), valid=_print_as_set(valid_flavors)
)
)
return flavor
def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
flavor = _validate_flavor(flavor)
compiled_match = re.compile(match) # you can pass a compiled regex here
# hack around python 3 deleting the exception variable
retained = None
for flav in flavor:
parser = _parser_dispatch(flav)
p = parser(io, compiled_match, attrs, encoding, displayed_only)
try:
tables = p.parse_tables()
except Exception as caught:
# if `io` is an io-like object, check if it's seekable
# and try to rewind it before trying the next parser
if hasattr(io, "seekable") and io.seekable():
io.seek(0)
elif hasattr(io, "seekable") and not io.seekable():
# if we couldn't rewind it, let the user know
raise ValueError(
"The flavor {} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
"another parser. Try read_html() with a "
"different flavor.".format(flav)
)
retained = caught
else:
break
else:
raise_with_traceback(retained)
ret = []
for table in tables:
try:
ret.append(_data_to_frame(data=table, **kwargs))
except EmptyDataError: # empty table
continue
return ret
def read_html(
io,
match=".+",
flavor=None,
header=None,
index_col=None,
skiprows=None,
attrs=None,
parse_dates=False,
thousands=",",
encoding=None,
decimal=".",
converters=None,
na_values=None,
keep_default_na=True,
displayed_only=True,
):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str, path object or file-like object
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError(
"cannot skip rows starting from the end of the "
"data (you passed a negative value)"
)
_validate_header_arg(header)
return _parse(
flavor=flavor,
io=io,
match=match,
header=header,
index_col=index_col,
skiprows=skiprows,
parse_dates=parse_dates,
thousands=thousands,
attrs=attrs,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/model_selection/tests/test_search.py | 20 | 30855 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
mr-perry/SHARAD_Coverage | code/SHARAD_Coverage_func.py | 1 | 64354 | #!/usr/bin/env python3.4
#
# Import necessary libraries, module, etc
#
import sys, os, socket, argparse, time
import shapefile
import MySQLdb as SQL# MySQL Interface Library
import _mysql_exceptions
import numpy as np
import scipy.interpolate
import scipy.ndimage
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.io.idl import readsav # Needed to read IDL MOLA save files
from mpl_toolkits.basemap import Basemap
from datetime import datetime as dt
from copy import copy
#
# MATT, THIS IS YOUR 'FROM SCRATCH' VERSION OF SHARAD_COVERAGE
# DESCREPENCIES EXIST BETWEEN THE IDL AND PYTHON VERSION AND I NEED
# TO FIX THEM. SHARAD_COVERAGE.PY WAS THE ORIGINAL WORKING VERSION,
# SHARAD_COVERAGE2.PY WAS USED TO DEBUG SIZE DESCREPENCIES
#
###########################################################################
# FUNCTIONS
#
# PARSE ARGS IS TAKEN DIRECTLY FROM SHARAD_COVERAGE2.PY
#
def parseargs(prog, vers, shresc, shres):
#
# Parse the Command Line arguments
#
#
# Set the default values
#
orng = np.array([829,100000])
srng = np.array([0., -88., 360., 88.])
orbitids = ''
orbittrig = False
roll = 99.
rcri = '<'
nomap = 0
foldmap = 0
elevmap = 0
replot = 0
update = 0
clat = 0
cset = 0
shresc = 3.0
missions = ['PSP', 'ESP', 'EM1', 'EM2', 'EM3', 'EM4', 'EM5']
shresi = 0.750
#
# Initiate parser
#
parser = argparse.ArgumentParser(description=str(prog + ' ' + str(vers)))
#
# Ordered Arguments
#
parser.add_argument('mapname', type=str, nargs=1, default='SHARAD_coverage',
help = str("Desired base filename for map: {SHARAD_coverage}"))
#
# Optional Arguments
#
parser.add_argument('-o', '--orbitrange', nargs=2, default=orng, type=float,
help="Desired range of orbits to include")
parser.add_argument('-r','--rollabove', nargs=1, type=float,
help="Select data with roll at or above angle, degrees")
parser.add_argument('-n', '--rollbelow', nargs=1, type=float,
help="Select data with roll below angle, degrees")
parser.add_argument('-w', '--trackwidth', nargs=1, default=shresc, type=float,
help="Effective track width in km")
parser.add_argument('-p', '--pixelsperdegree', nargs=1, default=shres, type=float,
help="Mapping resolution in pix/deg")
parser.add_argument('-m', '--mapping', nargs=4, default=srng, type=float,
help="Geographic mapping limits in degrees")
parser.add_argument('-s', '--selection', nargs=4, default=[999, 999, 999, 999], type=float,
help="Geographic selection limits in degrees")
parser.add_argument('-c', '--centerlat', nargs=1, type=int,
help="Center latitude for projection")
parser.add_argument('-P', '--missionphase', nargs=1, type=str,
help="Map coverage for specific science phase")
parser.add_argument('-C', '--cumulative', action="store_true", default=False,
help="Map science phase maps cumulative")
parser.add_argument('-e', '--elevation', action="store_true", default=False,
help="Map coverage in elevation rather than over elevation")
parser.add_argument('-O', '--overlay', action="store_true", default=False,
help="Overlay colored MOLA elevation over b/w MOLA map")
parser.add_argument('-f', '--foldmap', action="store_true", default=False,
help="Map coverage in fold rather than over elevation")
parser.add_argument('-d', '--savedataonly', action="store_true", default=False,
help="Retain ground-track data and skip mapping")
parser.add_argument('-R', '--restore', nargs=1, type=str,
help="Restore previously saved map file and recreate plot")
parser.add_argument('-U', '--update', nargs=1, type=str,
help="Update previous version of map (requires appropriate .sav file)")
parser.add_argument('-T', '--targetbox', nargs=1, type=int, default=0,
help="Select target box")
parser.add_argument('-L', '--landingsite', nargs=1, type=str, default='',
help="Select Landing Sites: CLH, JEZ, NES, ARA, MAW, OXA")
parser.add_argument('-I', '--orbitfile', nargs=1, type=str, default='',
help="Import Orbit IDs to map")
parser.add_argument('-D', '--backgrounddata', nargs=1, type=str, default='MOLA',
help="Select the background data to plot: MOLA, TES (not available), MOLAHRSC (not available)")
if len(sys.argv[1:]) == 0:
parser.print_help()
# parser.print_usage() # for just the usage line
parser.exit()
args = parser.parse_args()
#
# Before parsing argument check if files mentioned in replot or update are valid
#
#
# Now parse the remaining arguments
#
mapbase = str(args.mapname[0])
#
# Open log file with mapname
#
outDir = '/home/mperry/Applications/SHARAD_Coverage/output/'
if not os.path.isdir(outDir):
os.mkdir(outDir)
log_fname = outDir + mapbase + ".log"
sys.stdout = open(log_fname, 'w')
sys.stderr = sys.stdout
#
# Get background data
#
bg_data = 'MOLA' if args.backgrounddata == 'MOLA' else args.backgrounddata[0]
#
# Check if update file exists
#
if args.update is not None:
update = True
f_update = args.update[0]
if not os.path.isfile(f_update):
print("ERROR: File with which to update does not exist in the current path.")
print("Please move to file to the local directory or specify the file's full path")
print("Exiting")
sys.stdout.flush()
parser.exit()
if type(orng[0]) is str:
orng = np.float(orng)
orng = np.asarray(args.orbitrange)
#
# Only -r or -n can be specified. If both are specified print usage and exit
#
if args.rollbelow and args.rollabove:
print("ERROR: Only '-r' or '-n' can be specified. Exiting.")
sys.stdout.flush()
parser.print_help()
parser.exit()
elif args.rollbelow is not None:
roll = float(args.rollbelow[0])
rcri = str("<")
elif args.rollabove is not None:
roll = float(args.rollabove[0])
rcri = str(">=")
mres = shres if args.pixelsperdegree == shres else int(args.pixelsperdegree[0])
try:
wtrk = float(args.trackwidth[0])
except:
wtrk = float(args.trackwidth)
#############################
#
# Map bounds!
#
# Check if target box was selected
#
if args.targetbox != 0:
target = args.targetbox[0] #Retrieve target box
#
# Determine latitude and longitude bounds
#
mrng = targetboxbounds(target)
srng = targetboxbounds(target)
elif args.landingsite != '':
LS_Abbr = args.landingsite[0] # Get landing site abbreviation
#
# Determine latitude and longitude bounds
#
mrng = landingsitebounds(LS_Abbr)
else:
mrng = np.asarray(args.mapping, float)
srng = np.asarray(args.selection, float)
if srng[0] == 999:
srng[0] = mrng[0]
if srng[1] == 999:
srng[1] = mrng[1]
if srng[2] == 999:
srng[2] = mrng[2]
if srng[3] == 999:
srng[3] = mrng[3]
if args.centerlat is None:
#
# If the center latitude is not specified find the center latitude
#
avlat = float((mrng[1]+mrng[3])/2)
if avlat < -60:
clat = -90
elif avlat > 60:
clat = 90
else:
clat = 0
else:
clat = args.centerlat[0]
if args.missionphase is not None:
#
# Make sure mission phase is one of the actual mission phases
#
phase = str(args.missionphase[0]).upper()
if phase in missions:
ph_trig = True
else:
print("Error: Mission phase %s not in mission list. Please revise and restart" %
(phase))
sys.stdout.flush()
else:
phase = 'None'
cml = args.cumulative
if (args.elevation or args.foldmap) and args.savedataonly:
print("WARNING: Elevation or Fold maps selected along with no map option")
print("WARNING: Continuing with no map option")
sys.stdout.flush()
elevmap = False
foldmap = False
nomap = args.savedataonly
else:
elevmap = args.elevation
foldmap = args.foldmap
overlay = args.overlay
#
# If overlay is selected, turn off elevation map
#
if overlay and elevmap:
elevmap = False;
####
#
# Generate Product ID range from orbit range
#
try:
orbitfile = args.orbitfile[0]
except:
orbitfile = args.orbitfile
if orbitfile != '':
orbittrig = True
#
# Check if file exists
#
if os.path.isfile(orbitfile):
#
# Success! Now read in file
#
orbitids = np.loadtxt(orbitfile, int)
else:
print('Orbit file {} not found'.format(str(orbitfile)))
sys.exit()
if orng[0] < 829:
orng[0] = 829
pmin = int(orng[0] * 100000)
pmax = int((orng[1] + 1) *100000)
#
# MATT 28 SEPT 2017
# CHECK SHARAD INLINE RESOLUTION. IF THE CURRENT TRACK WIDTH IS LESS
# THAN 0.750 KM, ADJUST TO WTRK TO AVOID INTERPOLATION ISSUES AND CHECKERED
# TRACK APPEARANCES
if wtrk < 0.750:
shresi = wtrk
#
# Print out summary
#
print("----- Coverage Map Options Summary -----")
print("Map Base:\t{}".format(mapbase))
print("Background Data:\t{}".format(bg_data))
if orbittrig:
print("Orbit File:\t{}".format(str(orbitfile)))
else:
print("Orbit Range:\t{}".format(str(orng)))
print("Roll Angle:\t{}{}".format(str(rcri), str(roll)))
print("Track Width:\t{}".format(str(wtrk)))
print("Map Resolution:\t{}".format(str(mres)))
print("SHARAD Inline Resolution:\t{}".format(str(0.750)))
if args.targetbox != 0:
print("Target Box:\t{}".format(str(args.targetbox)))
elif args.landingsite != '':
print("Landing Site:\t{}".format(str(args.landingsite)))
print("Mapping Range:\t{}".format(str(mrng)))
print("Selection Range:\t{}".format(str(srng)))
print("Center Latitude:\t{}".format(str(clat)))
print("Mission Phase:\t{}".format(str(phase)))
print("Cumulative Mission Phase Map:\t{}".format(str(cml)))
print("Overlay Map:\t{}".format(str(overlay)))
print("Elevation Map:\t{}".format(str(elevmap)))
print("Fold Map:\t{}".format(str(foldmap)))
print("Save track data (no plot):\t{}".format(str(nomap)))
print("Restore a previous plot:\t{}".format(str(replot)))
print("Update an existing map:\t{}".format(str(update)))
print("----- End of Summary -----")
sys.stdout.flush()
return mapbase, orng, orbitids, pmin, pmax, roll, rcri, wtrk, mres, shresi, \
mrng, srng, clat, phase, cml, elevmap, foldmap, nomap, \
replot, update, overlay, bg_data
def targetboxbounds(targetbox_number):
prom = '[TARGET_BOX]: '
target_path = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/target_boxes/Targets'
#
# Given a target box number, return latitude and longitude bounds
#
sf = shapefile.Reader(target_path)
shapes = sf.shapes()
records = sf.records()
# Make array of IDS, longitudes and latitudes
if len(shapes) == len(records): #Idiot check
ids = np.zeros(len(records), int)
for ii in range(len(records)):
ids[ii] = records[ii][2]
w = np.where(ids == targetbox_number)
if len(w[0]) > 0:
mrng = np.asarray(shapes[w[0][0]].bbox)
else:
print(prom + 'Target box {} not found. Exiting.'.format(str(targetbox_number)))
sys.exit()
return mrng
def landingsitebounds(LS_Abbr):
prom = '[LANDING_SITE]: '
#
# LS_Abbr: Landing Site abbreviation
#
landing_sites = { # Mars2020 Landing Sites
'CLH': np.array([174.675, -15.386, 176.376, -13.685]),
'JEZ': np.array([76.669, 17.621, 78.370, 19.322]),
'NES': np.array([76.298, 17.0327, 77.998, 18.733]),
# ExoMars Landing Sites
'ARA': np.array([347.950, 7.132, 349.650, 8.833]),
'MAW': np.array([341.198, 21.398, 342.899, 23.098]),
'OXA': np.array([334.908, 17.288, 336.609, 18.989])
}
try:
mrng = landing_sites[LS_Abbr.upper()]
return mrng
except:
print(prom + 'Landing Site %s was not found. Please check your abbreviation and try again.' % (LS_Abbr.upper()))
sys.exit()
#
# LOAD MOLA
#
def loadmola(mres, null=np.nan):
#
# This function loads and resamples per user input the MOLA data.
# The function mapextract is called in order to limit the extent of the MOLA
# data based on user input.
#
# Adapted for Python from SHARAD_coverage by Matthew Perry
# Last update to this function: 13 July 2017
#
# Input:
# mres - Resolution of map in pixels per degree
# null - null value; default is np.nan
#
# Output:
# elev = MOLA map
#
############################################################################
prom = "[LOAD_MOLA]: "
#
# Create map from binary INT array of surface elevation
# above areoid in meters and overplot ground tracks from
# selected observations
#
savemap = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/mola_data/mola_' + str(mres) + 'ppd.npy'
if not os.path.isfile(savemap):
#
# Rebin the elevation map
#
res = 128 #MOLA res in pix/deg
nlon = 360
nlat = 176
infile = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/mola_data/mola.128.dat'
#
# Read in and convert MOLA data
#
elev = np.fromfile(infile, dtype=np.int16, count=-1)
elev = np.reshape(elev, [nlat*res,nlon*res])
elev = np.asarray(elev)
elev = congrid(elev, [nlat*mres, nlon*mres])
elev = np.flipud(elev)
np.save(savemap,elev)
else:
print(prom + "Restoring existing resampled MOLA map {}".format(savemap))
sys.stdout.flush()
elev = np.load(savemap)
elev = elev/1000 #Place MOLA in KM
sys.stdout.flush()
return elev
def loadmdim(mres):
#
# This function loads and resamples per user input the MDIM data.
# The function mapextract is called in order to limit the extent of the MDIM
# data based on user input.
#
# Adapted for Python from SHARAD_coverage by Matthew Perry
# Last update to this function: 13 July 2017
#
# Input:
# mres - Resolution of map in pixels per degree
# null - null value; default is np.nan
#
# Output:
# elev = MDIM map
#
############################################################################
prom = "[LOAD_MDIM]: "
#
# Create map from binary INT array of surface elevation
# above areoid in meters and overplot ground tracks from
# selected observations
#
savemap = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/mdim_data/mdim_' + str(mres) + 'ppd.npy'
if not os.path.isfile(savemap):
#
# Rebin the elevation map
#
print(prom + "This is not available yet. Exiting")
sys.stdout.flush()
sys.exit()
'''
res = 128 #MOLA res in pix/deg
nlon = 360
nlat = 176
infile = '/data/mola_data/mola.128.dat'
#
# Read in and convert MOLA data
#
elev = np.fromfile(infile, dtype=np.int16, count=-1)
elev = np.reshape(elev, [nlat*res,nlon*res])
elev = np.asarray(elev)
elev = imresize(elev, [nlat*mres, nlon*mres])
elev = np.rot90(elev,3) #Reorient with N up and W left
np.save(savemap,elev)
'''
else:
print(prom + "Restoring existing resampled MDIM map {}".format(savemap))
sys.stdout.flush()
elev = np.load(savemap)
elev = elev #Place MOLA in KM
sys.stdout.flush()
return elev
def interpTracks(wo_data, shresi=0.75, limit=np.array([0,-88.,360, 88.])):
erad = 3396 # Mars equatorial radius in km
d2r = np.pi / 180. # Degress to radians
nx = np.shape(wo_data)[0]
npnt = 0
nob = 0
olat = []
olon = []
if nx > 1:
for jj in range(0, nx-1):
#
# Find great circle distance from spherical law of cosines
# #Errors occur if x[jj+1] - x[jj] == 0
d = erad * np.arccos(np.sin(wo_data[jj, 0]*d2r) * np.sin(wo_data[jj+1, 0]*d2r) +
np.cos(wo_data[jj,0]*d2r) * np.cos(wo_data[jj+1, 0]*d2r) *
np.cos((wo_data[jj+1,1] - wo_data[jj,1])*d2r))
if d > 200:
#
# These points occur where a ground track is interrupted
# due to a variable roll angle exceeding the selection limit
# or to the track coming in and out of the mapping limits.
#
print(warn3 + str(d) + ' km')
else:
try:
npt = int((d/shresi) + 1) # Set number of interp points by in-track res)
except:
print(warn4)
npt = 0 # Duplicate rows will cause d to be nana
if npt > 1:
#
# Adjust tracks that cross the meridian for interpolation
#
if wo_data[jj+1,1] - wo_data[jj,1] > 300:
wo_data[jj,1] = wo_data[jj,1] + 360
if wo_data[jj+1,1] - wo_data[jj,1] < -300:
wo_data[jj,1] = wo_data[jj,1] - 360
#
# MATT: This interpolation could be incorrect. MAKE SURE TO CHECK THIS
# IF THE TRACKS LOOK LIKE SILLY GOOSES
#
u = np.interp(np.arange(0, npt),[0, npt-1], [wo_data[jj,1], wo_data[jj+1,1]]) # Interpolation longitudes
v = np.interp(np.arange(0, npt),[0, npt-1], [wo_data[jj,0], wo_data[jj+1,0]]) # Interpolation latitudes
#
# Correct meridian-crossing longitucdes
#
wneg = np.where(u > 0)
if len(wneg[0]) > 0:
u[wneg] = u[wneg]+360.
wbig = np.where(u >= 360.)
if len(wneg[0]) > 0:
u[wbig] = u[wbig] - 360.
else:
u = wo_data[jj,1] if jj < nx - 2 else [wo_data[jj,1], wo_data[jj+1,1]] # Transfer longitudes
v = wo_data[jj,0] if jj < nx - 2 else [wo_data[jj,0], wo_data[jj+1,0]] # Transfer latitudes
print(warn2)
#
# Apply mapping limits
#
wlim = np.where((u >= limit[0]) & (u <= limit[2]) & (v >= limit[1]) & (v <= limit[3]))
if len(wlim[0]) > 0:
#
# Create or append to orbit lon, lat array
#
olon = np.append(olon, u)
olat = np.append(olat, v)
npnt += 1 # Increment interpolation counter
nob += 1
else:
print(warn1)
sys.stdout.flush()
return npnt, nob, olon, olat
def gettracks(odat, wtrk, troll='', shresi=0.75, limit=np.array([0, -88, 360, 88])):
st = dt.now()
global warn1, warn2, warn3, warn4
prom = "[GET_TRACK]: "
#
# Find number of unique orbit IDs
#
oval = np.array(odat[:,0], int)
ouni = oval[np.sort(np.unique(oval, return_index=True)[1])] # Orb_id (unique)
nuni = len(ouni) # Number of unique Orb_id values
nobs = str(nuni)
print(prom + "Database has {} SHARAD observations {}".format(nobs, troll))
sys.stdout.flush()
#
# Now cycle through ground-tracks, map orbit lon,lat (x,y) to map lon,lat (u,v)
#
nobs = 0 # Reset number of orbits to zero
npnts = 0 # St interpolation counter to zero
olons = []
olats = []
for ii in range(nuni):
wo = np.where(oval == ouni[ii]) # Find unique Orb_id
wo_data = odat[wo, 1:3][0]
warn1 = prom + "WARNING: less than two points, Orb_id {} not mapped".format(str(ouni[ii]))
warn2 = prom + "WARNING: Orb_id {} points closer than {} km".format(str(ouni[ii]), str(wtrk))
warn3 = prom + "WARNING: Omitted point from Orb_id {} spaced > 200 km -- ".format(str(ouni[ii]))
warn4 = prom + "WARNING: Invalid value encountered when calculating the distance for Orb_id {} ".format(str(ouni[ii]))
npnt, nob, olon, olat = interpTracks(wo_data)
npnts += npnt # Update
nobs += nob # Update
olons.append(olon)
olats.append(olat)
olon = np.concatenate(olons).ravel()
olat = np.concatenate(olats).ravel()
print(prom + "Mapping {} points on {} SHARAD observations {}".format(str(npnts), str(nobs), troll))
print(prom + "Completed in {}".format(dt.now() - st))
sys.stdout.flush()
return nobs, npnts, olat, olon
def fixarrays(array1, array2):
if np.shape(array1)[0] != np.shape(array2)[0]:
#
# The rows are inconsistent
#
if np.shape(array1)[0] > np.shape(array2)[0]:
array1 = array1[:-1,:]
elif np.shape(array1)[0] < np.shape(array2)[0]:
coverage = coverage[:-1,:]
if np.shape(array1)[1] != np.shape(array2)[1]:
#
# The columns are inconsistent
#
if np.shape(array1)[1] > np.shape(array2)[1]:
array1 = array1[:,:-1]
elif np.shape(array1)[1] < np.shape(array2)[1]:
array2 = array2[:,:-1]
return array1, array2
def setcolormap(table, overlay, limit):
latDiff = limit[3] - limit[1]
lonDiff = limit[2] - limit[0]
if lonDiff >= 50. and latDiff >= 45.:
cmap = custom_color_tables('MOLA')
elif lonDiff >= 50. and (limit[3] == 87.5 and limit[1] == 69):
cmap = custom_color_tables('MOLA')
elif lonDiff >= 50. and (limit[1] == -87.5 and limit[3] == -69):
cmap = custom_color_tables('MOLA')
else:
cmap = mpl.cm.jet
if overlay:
cmap.set_bad(alpha=0.0)
else:
cmap.set_bad('black', 1.)
return cmap
def coveragesupptxt(troll, nobs, omin, omax, rarea, limit, clat):
prom = '[PLOT LABELS]: '
print(prom + 'Setting up the plot labels')
sys.stdout.flush()
ttle = "{} SHARAD Observations between orbits {} and {}".format(nobs, omin, omax)
if troll:
ttle += '\n'+troll
sup_txt1 = "Created at: {}".format(dt.now())
sup_txt2 = "{:5.2f}% Coverage\nW, N Limits: [{}, {}, {}, {}]\ncenter lat (N): {} \ncenter lon (E): {}".format(rarea, str(limit[0]), str(limit[1]), str(limit[2]), str(limit[3]), str(clat), str(0))
return ttle, sup_txt1, sup_txt2
###########################################################################
#
# Database functions
#
def formquery(limit, rcri, roll, pmin, pmax, phase, cml):
if phase == 'None':
#
# Get observation data
#
query = "SELECT O.Orb_id, O.Lat, O.Lon FROM Orbit_data O WHERE ABS(O.Roll) {} {} ".format(rcri, str(roll))
query += "AND O.Lon BETWEEN {} and {} ".format(str(limit[0]), str(limit[2]))
query += "AND O.Lat BETWEEN {} AND {} ".format(str(limit[1]), str(limit[3]))
query += "AND O.Orb_id IN (SELECT X.Orb_id FROM Orbit X WHERE X.Orb_id = (SELECT MAX(Y.Orb_id) FROM Orbit Y "
query += "WHERE X.Prod_id = Y.Prod_id) AND X.Prod_id BETWEEN {} AND {}) ".format(str(pmin), str(pmax))
query += "ORDER BY O.Sample_Time"
#
# Get minimum and maximum orbits and dates
#
query2 = "SELECT DISTINCT Orbit.Prod_id DIV 100000, Orbit.Start_time "
query2 += "FROM Orbit "
query2 += "WHERE Orbit.Prod_id = ( "
query2 += "SELECT MIN(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Prod_id BETWEEN {} and {}".format(str(pmin), str(pmax))
query2 += ") "
query2 += "OR Orbit.Prod_id = ( "
query2 += "SELECT MAX(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Prod_id BETWEEN {} and {}".format(str(pmin), str(pmax))
query2 += ")"
else:
if phase not in ['PSP', 'ESP', 'EM1',
'EM2', 'EM3', 'EM4',
'EM5', 'EM6']:
print('Select phase {} is not understood. Please try again'.format(phase))
sys.stdout.flush()
else:
if phase == "PSP":
st = '2006-10-01 00:00:00.000'
et = '2008-10-31 23:59:59.999'
elif phase == "ESP":
st = '2008-11-01 00:00:00.000'
et = '2010-11-30 23:59:59.999'
elif phase == "EM1":
st = '2010-12-01 00:00:00.000'
et = '2012-09-30 23:59:59.999'
elif phase == "EM2":
st = '2012-10-01 00:00:00.000'
et = '2014-09-30 23:59:59.999'
elif phase == "EM3":
st = '2014-10-01 00:00:00.000'
et = '2016-09-30 23:59:59.999'
elif phase == "EM4":
st = '2016-10-01 00:00:00.000'
et = '2019-09-31 23:59:59.999'
elif phase == "EM5":
st = '2019-10-01 00:00:00.000'
et = '2022-09-30 23:59:59.999'
elif phase == "EM6":
st = '2022-10-01 00:00:00.000'
et = '2024-09-30 23:59:59.999'
if cml:
st = '2006-10-01 00:00:00.000'
query = "SELECT OD.Orb_id, OD.Lat, OD.Lon FROM Orbit_data as OD INNER JOIN Orbit as O ON O.Orb_id = OD.Orb_id "
query += "INNER JOIN DecData as D ON D.Prod_id = O.Prod_id WHERE D.OST_Start_Date BETWEEN '{}' AND '{}' ".format(st, et)
query += "AND ABS(OD.Roll) {} {} ".format(rcri, str(roll))
query += "AND OD.Lon BETWEEN {} AND {} AND OD.Lat BETWEEN {} AND {} ".format(str(limit[0]), str(limit[2]), str(limit[1]), str(limit[3]))
query += "AND OD.Orb_id IN (SELECT X.Orb_id FROM Orbit X WHERE X.Orb_id = (SELECT MAX(Y.Orb_id) FROM Orbit Y "
query += "WHERE X.Prod_id = Y.Prod_id)) "
query += "ORDER BY OD.Orb_id"
#
# Get minimum and maximum orbits and dates
#
query2 = "SELECT DISTINCT Orbit.Prod_id DIV 100000, Orbit.Start_time "
query2 += "FROM Orbit "
query2 += "WHERE Orbit.Prod_id = ( "
query2 += "SELECT MIN(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Start_time BETWEEN '{}' and '{}'".format(str(st), str(et))
query2 += ") "
query2 += "OR Orbit.Prod_id = ( "
query2 += "SELECT MAX(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Start_time BETWEEN '{}' and '{}'".format(str(st), str(et))
query2 += ")"
return query, query2
def formorbitquery(limit, orbitids):
query = "SELECT O.Orb_id, O.Lat, O.Lon FROM Orbit_data O WHERE "
query += "O.Lon BETWEEN {} and {} ".format(str(limit[0]), str(limit[2]))
query += "AND O.Lat BETWEEN {} AND {} ".format(str(limit[1]), str(limit[3]))
query += "AND O.Orb_id IN (SELECT X.Orb_id FROM Orbit X WHERE X.Orb_id = (SELECT MAX(Y.Orb_id) FROM Orbit Y "
query += "WHERE X.Prod_id = Y.Prod_id) AND X.Prod_id IN ( "
for ii in range(len(orbitids)):
if ii != len(orbitids) - 1:
query += '{}, '.format(orbitids[ii])
else:
query += '{})) '.format(orbitids[ii])
query += 'ORDER BY O.Sample_Time'
query2 = "SELECT Orbit.Prod_id DIV 100000, Orbit.Start_time "
query2 += "FROM Orbit "
query2 += "WHERE Orbit.Prod_id = {} ".format(str(orbitids[0]))
query2 += "OR Orbit.Prod_id = {}" .format(str(orbitids[-1]))
return query, query2
def executequeries(limit,srng, rcri, roll, pmin, pmax, phase, cml, config_file, orbitids, latbuf=50.):
global_lim = np.array([0,-88,360,88])
erad = 3396 # Mars equatorial radius in km
d2r = np.pi / 180. # Degress to radians
escl = erad * d2r #Equatorial scale in km/deg
prom = '[EXECUTE_QUERIES]: '
#
# Determine selection limits
#
# THE FOLLOWING COMES FROM A CONVERSATION WITH THAN ON HOW TO AUTOMATE THE SEARCH
# TO INCLUDE ALL TRACKS THAT GO THROUGH A GIVEN WINDOW
#
if orbitids == '':
if np.all(limit == srng):
if limit[0] > global_lim[0]:
sllon = limit[0] - np.abs(latbuf*np.tan((1.6+87.4*np.cos((90-limit[1])*d2r))*d2r))/escl
else:
sllon = limit[0]
if limit[2] < global_lim[2]:
sulon = limit[2] + np.abs(latbuf*np.tan( (1.6+87.4*np.cos((90-limit[3])*d2r))*d2r))/escl
else:
sulon = limit[2]
if limit[1] > global_lim[1]:
sllat = limit[1]-np.abs(latbuf/escl)
else:
sllat = limit[1]
if limit[3] < global_lim[3]:
sulat = limit[3]+np.abs(latbuf/escl)
else:
sulat = limit[3]
else:
sllon = srng[0]
sllat = srng[1]
sulon = srng[2]
sulat = srng[3]
slimit = np.array([sllon, sllat, sulon, sulat])
print('Querying the database with selection limits: {}'.format(slimit))
sys.stdout.flush()
#
query, query2 = formquery(slimit, rcri, roll, pmin, pmax, phase, cml)
#
# Connect to DB and run query
#
else: #Orbit file loaded
print('Querying the database with selected orbits.')
query, query2 = formorbitquery(limit, orbitids)
[u, p, d] = dbreadconf(config_file)
db = dbconnect(u, p, d)
c = db.cursor()
numrows = c.execute(query)
if numrows == 0:
print(prom + 'The query returned no results. Exiting')
sys.stdout.flush()
#
# Infill results into array
#
orbit_data = np.zeros([numrows,3], float)
for ii in range(numrows):
row = c.fetchone()
orbit_data[ii,0] = row[0]
orbit_data[ii,1] = row[1]
orbit_data[ii,2] = row[2]
#
# Get min and max values from SQL
#
#if orbitids == '':
c = db.cursor()
c.execute(query2)
temp = c.fetchall()
orbit_min, min_date = temp[0][0], temp[0][1]
orbit_max, max_date = temp[-1][0], temp[-1][1]
#else:
# orbit_min = min(orbitids)
# orbit_max = max(orbitids)
#
# close DB connection
#
c.close()
db.close()
return orbit_data, orbit_min, orbit_max, min_date, max_date
def savedata(mapbase, coverage, fold, limit, rarea, nobs, troll, clat, omin, omax):
fname = mapbase
data = {'coverage': coverage, 'fold': fold, 'limit': limit, 'rarea': rarea, 'nobs': nobs, 'troll': troll, 'clat': clat}
np.save(fname, data)
return
def loaddata(filename):
#
# First check to ensure data exists in path given
#
if os.path.isfile(filename):
#
# File exists
#
data = np.load(filename)
data = data[()] # Data saved through numpy is an array containing a dictionary
coverage = data['coverage']
fold = data['fold']
limit = data['limit']
rarea = data['rarea']
nobs = data['nobs']
troll = data['troll']
clat = data['clat']
omin = data['omin']
omax = data['omax']
return coverage, fold, limit, rarea, nobs, troll, clat, omin, omax
def congrid(a, newdims, method='linear', centre=False, minusone=False):
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
#
# This is an ad-hoc fix to the cookbook being in python 3,
# I can't figure out how to reproduce the results from
# [ndims - 1] + range(ndims -1 )
trorder = [1, 0]
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = n.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
##################################################################################
#
# Plotting Functions
#
##################################################################################
def getmapparams(clat, limit):
if limit[0] == 0 and limit[2] == 360:
#
# Global maps
#
if clat == 90:
table = 'MOLA'
vmin = -6.0
vmax = -3.0
proj = 'north'
meridians = np.arange(0, 360, 60)
parallels = np.arange(70,90+1, 10)
elif clat == -90:
table = 'MOLA'
vmin = -0.5
vmax = 6.0
proj = 'south'
meridians = np.arange(0, 360, 60)
parallels = np.arange(-90,-70-1, 10)
else:
table = 'MOLA'
vmin = -9
vmax = 21
proj = 'glob'
meridians = np.arange(0, 360, 60)
parallels = np.arange(-70,70+1, 20)
else:
table = 'MOLA'
meridians = np.arange(limit[0], limit[2], (limit[2]-limit[0])/3)
parallels = np.arange(limit[1], limit[3], (limit[3]-limit[1])/3)
if clat == 90:
vmin = -6.5
vmax = -1.5
proj = 'north_custom'
elif clat == -90:
vmin = -0.5
vmax = 6.0
proj = 'south_custom'
else:
vmin = -9
vmax = 21
proj = 'custom'
return table, vmin, vmax, proj, meridians, parallels
def plotmap(coverage, nelev, foldmap, elevmap, limit, blon, blat, troll, nobs, omin, omax, rarea, clat, mres, overlay=False, mapbase="SHARAD_Coverage", null=np.nan):
#
# Wrapper for map plotting
#
#
# Get plot labels and supplementary text
#
ttle, sup_txt1, sup_txt2 = coveragesupptxt(troll, nobs, omin, omax, rarea, limit, clat)
tclat = "" if clat == 0 else str("clat = " + str(clat))
#
# Set the plot size to standard paper size
# This will help create maps with uniform font sizes
#
xsize, ysize = 11, 8.5
if foldmap:
print('This function is not available at this time')
sys.stdout.flush()
return
else:
if overlay:
bg_map = nelev.copy()
bg_map[~np.isnan(coverage)] = null
nelev[np.isnan(coverage)] = null
else:
if elevmap: #Plot coverage paths in elevation rather than black
nelev[np.isnan(coverage)] = null
bg_map = False
else:
nelev[~np.isnan(coverage)] = null
bg_map = False
[table, vmin, vmax, proj, mer, par] = getmapparams(clat, limit)
#
# Get colormap and set nulls to black
#
cmap = setcolormap(table, overlay, limit)
#
# Let the plotting begin
#
drawmap(nelev, bg_map, blon, blat, xsize, ysize, limit, vmin, vmax, proj, mres, overlay=overlay, mer=mer, par=par, cmap=cmap, ttle=ttle, sup_txt1=sup_txt1, sup_txt2=sup_txt2, mapbase=mapbase)
return
def drawmap(nelev, bg_map, blon, blat, xsize, ysize, limit, vmin, vmax, proj, mres, overlay=False, mer=None, par=None, cmap=mpl.cm.jet, ttle=None, sup_txt1=None, sup_txt2=None, mapbase='SHARAD_Coverage', outDir='../output/'):
prom = '[DRAW MAP]: '
bannername = outDir + mapbase + '_banner.png'
jpgname = outDir + mapbase + '.jpg'
pngname = outDir + mapbase + '.png'
# Create meshgrid for plotting
#
if proj == 'glob' or proj == 'north' or proj == 'south':
#
# Roll nelev and bg_map to get into proper coordinates
#
sft = int(180*mres)
nelev = np.roll(nelev, sft, axis=1)
if overlay:
bg_map = np.roll(bg_map, sft, axis=1)
blon = blon - 180.;
limit[0] = limit[0] - 180.
limit[2] = limit[2] - 180.
if proj == 'glob':
mer = mer - 180.
#
# Now create a masked array for plotting
#
if overlay:
bg_map[np.where(~np.isnan(nelev))] = np.nan
map_bg = np.ma.masked_where(np.isnan(bg_map), bg_map)
plot_data = np.ma.masked_where(np.isnan(nelev), nelev)
[x, y] = np.meshgrid(blon, blat)
#
# Let the plotting begin
#
print(prom + 'Initializing Map')
sys.stdout.flush()
# fig = plt.figure(figsize=(xsize, ysize), dpi=500, facecolor='white')
fig = plt.figure(figsize=(11, 8.5), dpi=500, facecolor='white')
ax1 = fig.add_subplot(111)
#
# Set the basemap projection and limits
#
if proj == 'glob':
bmap = Basemap(projection='cyl', llcrnrlat=limit[1], urcrnrlat=limit[3], llcrnrlon=limit[0],
urcrnrlon=limit[2], resolution=None, lon_0=0)
elif proj == 'custom':
bmap = Basemap(projection='cyl', llcrnrlat=limit[1], urcrnrlat=limit[3], llcrnrlon=limit[0],
urcrnrlon=limit[2], resolution=None)
elif proj == 'north_custom':
bmap = polar_stere(limit[0], limit[2], limit[1], limit[3], resolution=None)
#bmap = Basemap(projection='npaeqd', boundinglat=limit[1], lon_0=0, resolution=None, round=False)
elif proj == 'south_custom':
bmap = Basemap(projection='spstere', boundinglat=limit[3], lon_0=180, resolution=None, round=False)
elif proj == 'north':
bmap = Basemap(projection='npstere', boundinglat=limit[1], lon_0=0, resolution=None, round=True)
elif proj == 'south':
bmap = Basemap(projection='spstere', boundinglat=limit[3], lon_0=180, resolution=None, round=True)
# Plot the data
#
if proj == "custom":
if overlay:
bmap.pcolormesh(x, y, map_bg, cmap=plt.cm.gray, latlon=True)
cs = bmap.pcolormesh(x, y, plot_data, cmap=cmap, latlon=True, alpha=1.0)
fig.savefig(bannername, dpi=100, bbox_inches='tight', transparent=True)
else:
if overlay:
bmap.pcolormesh(x, y, map_bg, vmin=vmin, vmax=vmax, cmap='gray', latlon=True)
cs = bmap.pcolormesh(x, y, plot_data, vmin=vmin, vmax=vmax, cmap=cmap, latlon=True, alpha=1.0)
fig.savefig(bannername, dpi=200, bbox_inches='tight', transparent=True)
print(prom + 'Banner image saved as {}'.format(bannername))
#
# If meridians and parallels, plot
#
if mer is not None:
bmap.drawmeridians(mer, labels=[False, False, True, True], fontsize=8)
if par is not None:
bmap.drawparallels(par, labels=[True, True, False, False], fontsize=8)
#
# Supplemental Text
#
if not sup_txt2 is None:
plt.gcf().text(0.00, 0.04, sup_txt2, fontsize=10, horizontalalignment='left', verticalalignment='center')
if not sup_txt1 is None:
plt.gcf().text(1.00, 0.01, sup_txt1, fontsize=10, horizontalalignment='right', verticalalignment='center')
#
# Colorbar
#
if proj == "north" or proj == "south":
cbar = bmap.colorbar(cs, location='bottom', pad="5%")
elif proj == "custom":
cbar = bmap.colorbar(cs, location='bottom', pad="10%")
else:
cbar = bmap.colorbar(cs, location='bottom', pad="20%")
#
# Plot title
#
plt.title(ttle, y=1.08, fontsize=12)
fig.savefig(jpgname, dpi=250)
fig.savefig(pngname, dpi=1000, transparent=True)
return
##################################################################################
#
# Log functions
#
##################################################################################
def writeLog(logfile, entryi, verbose=False):
prom = "[WRITE LOG]: "
if verbose:
print(entry)
if not fid.closed:
logfile.write(entry+"\n")
else:
print("Log file is closed or has not been opened for writing.")
print("Exiting...")
return
import sys, os
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import ListedColormap
from datetime import datetime as dt
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
def mapextract(inmap, limit, lhead=1024, nlon=None, nlat=None, fmt=False, null=np.nan,
diag=False, extent=np.array([0., -90., 360., 90.]), swap_endian=False):
#
# Created: 2003 Sep 01, Than Putzig - converted from mappoint.pro
# mod: 2004 Sep 08, Than Putzig - switch to LIMIT spec, added assoc params
# mod: 2006 Feb 24, Than Putzig - no longer loading INMAP to array,
# added min, max, mean, and median diagnostics
# mod: 2012 Mar 09, Than Putzig - added SWAP_ENDIAN keyword
# mod: 2012 Mar 12, Than Putzig - fixed latitude range selection
# mod: 2017 Sep 29, Matt Perry - ported to Python 3+(missing file option at this point)
#
# For extracting a subset of an IDL or ASSOC map.
# Longitudes should be 0 to <360 (West or East)
# Latitudes should be -90 to 90 North.
# Input map is presumed to be of bin-centered data (e.g., first longitudes
# are 0+BLON/2 where BLON is longitudinal bin size).
#
# Procedure:
############################################################################
prom = '[MAP_EXTRACT]: '
##########################################################################
#
# Validity check -- test range of LLAT, HLAT, LLON and HLON, whether NLON
# and NLAT or neither is specified. Print error message
# and exit with zero for output if parameters incorrect.
llon=limit[0]
llat=limit[1]
hlon=limit[2]
hlat=limit[3]
if llon > 360. or llon < 0. or hlon > 360. or hlon < 0. or llon > hlon:
print(prom + "longitude value out of range (0-360, llon < hlon)")
outmap = 0.0
print(' ')
sys.stdout.flush()
return outmap
if llat > 90. or llat < -90. or hlat > 90. or hlat < -90 or llat > hlat:
print(prom + "latitude value out of range (-90-90, llat < hlat)")
outmap = 0.0
print(' ')
sys.stdout.flush()
return outmap
if not nlon is None or not nlat is None:
print(prom + "You must specify either both NLON and NLAT or neither.")
outmap = 0.0
print("")
sys.stdout.flush()
return outmap
#######################################################################
#
# determine whether INMAP is array or file and extract dimensions
#
# If INMAP is a file, it is assumed to be an assoc file with a 1024 byte
# header, where the first two words are the number of longitudes and number
# of latitudes, respectively (e.g., output from mapbin.pro), and the data
# is an array of type INT (16 bit signed integers).
#
atype = type(inmap)
if atype is str:
#Here I am assuming any string entry is a file name
print(prom + "This functionality is not yet available. Please use array until further notice.")
outmap = 0.0
print("")
sys.stdout.flush()
return outmap
elif atype is np.ndarray:
mp = inmap
msize = np.shape(mp)
nlon = msize[1]
nlat = msize[0]
else:
print(prom + "Variable type not recognized. Please use either an array or file name.")
outmap = 0.0
print("")
sys.stdout.flush()
return outmap
#######################################################################
#
# precompute a few things
#
if np.shape(extent)[0] != 4:
extent = np.array([0., -90., 360., 90.])
polar = '' # preset
lat = 0 # preset
blat = (extent[3] - extent[1]) / nlat # latitude bin size
blon = (extent[2] - extent[0]) / nlon # longitude bin size
maxlon = nlon*blon - blon/2 #Largest longitude of bin center
#
# Find the high and low latitude and longitude indices
#
lat_lo = 0
lat_hi = nlat - 1
for ii in range(0, nlat): #ii: loop latitudes ascending
jj = nlat - 1 - ii #jj: reverse loop
latii = ii*blat + extent[1] + blat/2 # find map bin center for ii
latjj = jj*blat + extent[1] + blat/2 # find map bin center for jj
if latii < llat:
lat_lo = ii + 1 #Save lower limit for ii
if latjj > hlat:
lat_hi = jj - 1 #Save upper limit for jj
lon_lo = 0
lon_hi = nlon - 1
for ii in range(0, nlon): #ii: loop longitudes ascending
jj = nlon - 1 - ii #jj: reverse loop
lonii = ii*blon + extent[0] + blon/2 # find map bin centers for ii
lonjj = jj*blon + extent[0] + blon/2 # find map bin centers for jj
if lonii < llon:
lon_lo = ii + 1 # save lower limit for ii
if lonjj > hlon:
lon_hi = jj - 1 # save upper limit for jj
#
# Python's slicing goes from n:m where the mth element IS NOT included
# Add one to each hi index.
#
lon_hi = lon_hi + 1
lat_hi = lat_hi + 1
# Get OUTMAP
#
outmap = mp[lat_lo:lat_hi, lon_lo:lon_hi]
if diag:
#
# Find min, max, mean, median
#
wdnn = np.where(outmap != null)
if len(wdnn[0]) == 0:
print(prom + 'WARNING: all data in null = {}'.format(null))
print(prom + 'Exiting...')
sys.stdout.flush()
exit()
dmin = np.amin(outmap[wdnn])
dmax = np.amax(outmap[wdnn])
dmea = np.mean(outmap[wdnn])
dmed = np.median(outmap[wdnn])
#
# Get output lat and lon arrays
#
# Determine minimum and maximum lats and lons from indicies
#
minlon = lon_lo * blon + blon / 2
maxlon = lon_hi * blon + blon / 2
minlat = lat_lo * blat - 88. + blat / 2.
maxlat = lat_hi * blat - 88. + blat / 2.
#
# How many elements will be in the arrays
#
nlats = int((maxlon - minlon)/blon)
nlons = int((maxlat - minlat)/blat)
#
# Form arrays
#
lon_array = np.arange(nlons) * blon + minlon + blon/2
lat_array = np.arange(nlats) * blat + minlat + blat/2
#limit = [lon_lo*blon+blon/2,
# lat_lo*blat-88.+blat/2,
# lon_hi * blon + blon / 2,
# lat_hi * blat - 88. + blat / 2]
#
# Diagnostic output
#
print('------------------------')
print('Map information:')
if atype is str:
print('Map file: {}'.format(inmap))
print('NLAT: {}'.format(str(nlat)))
print('NLON: {}'.format(str(nlon)))
if atype is str:
print('LHEAD: {}'.format(str(lhead)))
print('Values at first 5 longitudes starting from 0:')
print('Northmost:')
print(str(mp[nlat-1, 0:4]))
print("Southern most:")
print(str(mp[0, 0:4]))
print("------------------------")
print("Desired subsection:")
print("LLAT:\t{}".format(str(llat)))
print("HLAT:\t{}".format(str(hlat)))
print("LLON:\t{}".format(str(llon)))
print("LLON:\t{}".format(str(hlon)))
print("------------------------")
print("Extracted data:")
print("\tMap Pixel:\tLat/Lon:")
print("lat_lo:\t{},{}".format(str(lat_lo), str(minlat)))
print("lat_hi:\t{},{}".format(str(lat_hi), str(maxlat)))
print("lon_lo:\t{},{}".format(str(lon_lo), str(minlon)))
print("lon_hi:\t{},{}".format(str(lon_hi), str(maxlon)))
print("minimum:\t{}".format(dmin))
print("maximum:\t{}".format(dmax))
print("mean:\t{}".format(dmea))
print("median:\t{}".format(dmed))
print("------------------------")
print("Output map data array:")
print('{}:\t{}'.format(type(outmap), np.shape(outmap)))
print("------------------------")
sys.stdout.flush()
return outmap, blat, blon, lat_array, lon_array
2
def getmap(dim, data, lat, lon, blat, blon, null=np.nan, limit=np.array([0., -90., 360., 90.]), fp=0.0):
st = dt.now()
# #############################################################################
# mapp.pro
#
# Written 1998 Mar 05, Mike Mellon
# Updated 1998 Apr 20, Mike Mellon, to improve speed
# Updated 2011 Dec 05, Than Putzig, to allow non-global mapping
# Updated 2014 Feb 24, Than Putzig, to allow rectangular bins
# Updated 2014 Feb 26, IDL Version 1: Than Putzig, add use of footprints
# Updated 2015 Sep 23, IDL Version 1: Than Putzig, change fix to long for longitude
# Updated 2017 Sep 01, IDL Version 2: Than Putzig, rename to mapp from map (IDL8.x)
# Updated 2017 Sep 29, Python Version 1: Matt Perry, ported to Python 3+ from mapp.pro
#
# This program is intended to be used to bin data into a map. It accepts
# an array of data points and co-registered latitudes and longitudes. From
# this is creates a map of the desired bin size and puts the data into those
# bins averaging as needed. If there is no data in a bin, value is set to NULL.
# As of version 1, a footprint for the data may be specified that will cause
# data points to be mapped to more than one bin where the footprint exceeds
# the bin size (typically more often at high latitudes). For compatibility
# with prior versions, the default footprint size is zero.
# #############################################################################
vers = '1'
prom = '[GET_MAP]: ' # Errors prompt
d2r = np.pi / 180. # degree to radians
eflag = 0 # Preset error flag
ndat = len(data) # Get size of data array
if ndat != len(lat):
elfag = 1
print(prom + "DATA and LATITUDE dimensions are not the same")
print(prom + "DATA AND LATITUDE must be co-registered")
sys.stdout.flush()
if ndat != len(lon):
elfag = 1
print(prom + "DATA and LONGITUDE dimensions are not the same")
print(prom + "DATA AND LONGITUDE must be co-registered")
sys.stdout.flush()
if blat <= 0 or blon <=0:
eflag = 1
print(prom + "bin size(s) must be greater than zero")
sys.stdout.flush()
#
# Check LIMIT ranges (default to global)
#
if type(limit) is not np.ndarray:
limit = np.array(limit, float)
if len(limit) != 4:
limit = np.array([0, -90, 360, 90])
minlon = limit[0]
minlat = limit[1]
maxlon = limit[2]
maxlat = limit[3]
longlob = (limit[0] == 0. and limit[2] == 360.)
if minlon > 360. or minlon < 0. or maxlon > 360. \
or maxlon < 0. or minlon > maxlon:
print(prom + "LIMIT longitude value out of range (0-360, minlon < maxlon")
sys.stdout.flush()
eflag = 1
if minlat > 90. or minlat < -90. or maxlat > 90. \
or maxlat < -90. or minlat > maxlat:
print(prom + "LIMIT latitude value out of range (-90-90, minlat < maxlat")
sys.stdout.flush()
eflag = 1
if eflag == 1:
mout = -1
###############################################################################
# Define needed variables: I need to create a map array with defined
# longitude and latitudes in order to place co-registered data-lat-lon
# into a mapped format.
###############################################################################
req = 3396. # Radius of Mars at equator
bn = np.array([blon, blat])
if type(fp) is not np.ndarray or type(fp) is not list:
fp = float(fp)
fp = np.array([fp,fp])
elif type(fp) is list:
fp = np.array(fp)
dhfe = bn/2 if np.amin(fp) == 0.0 else fp*180/(np.pi*req)/2 # deg/0.5 footprint @ equat
nlon = int((maxlon - minlon)/bn[0]) # Number of map longitudes
nlat = int((maxlat - minlat)/bn[1]) # Number of map latitudes
mout = np.zeros([int(nlat), int(nlon)], float) # Initialize output map array
nmap = np.zeros([int(nlat), int(nlon)],int) # Initialize data density map array
mout_new = np.zeros([int(nlat), int(nlon)], float) # Initialize output map array
nmap_new = np.zeros([int(nlat), int(nlon)],int) # Initialize data density map array
mlon = np.arange(int(nlon)) * bn[0] + minlon + bn[0]/2 # lon array centered on map bins
mlat = np.arange(int(nlat)) * bn[1] + minlat + bn[1]/2 # Lat array centered on map bins
###############################################################################
# Next, scan through all the longitudes and latitudes in MOUT, locate all
# the data points falling within a bin and sum them, averaging at the end.
# The streamlined way is to find all the points that fall in a zone of
# bins (all the same latitude), save that subset of longitudes and data,
# then search only that subset for appropriate longitude bins. It turns out to
# be more that 10 times faster that way (at least when Mellon developed this
# method with longitudes in the outer loop and latitudes in the inner loop).
###############################################################################
slat = np.amax(np.append(dhfe[1], bn/2)) #Lat search distance in 1/2 bin or 1/2 footprint
err = 0
err_new = 0
dhfl_new = dhfe[0]/(np.cos(mlat*d2r)) #Degs/half footprint at this lat
n_new = np.array(dhfl_new*2/bn[0], int) # no. lon bins to fill
n_new[n_new < 1] = 1
n_new[n_new > int(nlon)] = int(nlon)
for ilat in range(0, int(nlat)): # Loop over latitute bins
#
# Test for largest latitude. Elsewhere, include bin lower boundary and stay
# less than upper boundary
#
if ilat != nlat-1: # Test for largest latitude
w1 = np.where((lat >= mlat[ilat] - slat) & (lat < mlat[ilat] + slat))
else:
#
# At largest latitude, include upper boundary (e.g. 90 N if global)
#
w1 = np.where((lat >= mlat[ilat] - slat) & (lat <= mlat[ilat] + slat))
if len(w1[0]) != 0: # Trap no points here
lon2 = lon[w1] # Save longitude subset
data2 = data[w1] #Save data subset
dhfl = dhfe[0]/(np.cos(mlat[ilat]*d2r)) #Degs/half footprint at this lat
n = int(dhfl*2/bn[0]) # no. lon bins to fill
if n < 1:
n = 1
elif n > int(nlon):
n = int(nlon)
lons_new = np.arange(n_new[ilat]) - float(n_new[ilat]-1.)/2.
#
# Confirmed that lons_new and lons are the same for all interations
#
lonz_new = lons_new*bn[0] + lon2.reshape([len(lon2),1])
ilon_new = np.array((lonz_new - limit[0])/bn[0], int)
#
# All this line does is make sure that the indexes do not exceed nlon
# and are greated than zero
#
if longlob:
ilon_new = np.mod(ilon_new+nlon, nlon)
else:
#
# Create a masked array, nulling out where values meet conditions
#
ilon_new = ilon_new[np.where( (ilon_new < int(nlon)) & (ilon_new >= 0) )]
#
# Now create a copy of ilon_new and replace non null values with ilat
jlat_new = np.zeros(np.shape(ilon_new), int) + ilat
try:
mout_new[jlat_new, ilon_new] = mout_new[jlat_new, ilon_new] + 1
nmap_new[jlat_new, ilon_new] = nmap_new[jlat_new, ilon_new] + 1
except (RuntimeError, TypeError, NameError, IndexError) as e:
err_new += err_new
nmap = nmap_new
mout = mout_new
if err != 0:
print(prom + 'WARNING: {} samples not included in map'.format(err))
sys.stdout.flush()
ind_null = np.where(nmap == 0)
if len(ind_null[0]) > 0:
mout[ind_null] = null # set zero-count bins to NULL
w = np.where(nmap != 0)
if len(w[0] > 0):
mout[w] = mout[w]/nmap[w] #Average map values
print(prom + "Took {} to complete".format(dt.now() - st))
return mout, mlat, mlon, nmap, ind_null
def maparea(mp, rng=False, limit=np.array([0., -90., 360., 90]), infile=False, lats=False, null=np.nan):
#############################################################################
# maparea.pro
#
# Modifications history:
# 2003 jun 05 Than Putzig, created from mapbin.pro
# 2007 Feb 09 Than Putzig, updated to allow read from extant array
# 2011 Nov 08 Than Putzig, to output mean values of map
# 2014 Feb 27 Than Putzig, to allow non-global maps
# 2017 Sep 29 Matt Perry, ported to python 3+
# 2017 Oct 27 Matt Perry, updated area calculation for quicker processing
#
# Reads a map from a file or an array and determines the area of
# coverage for values in a given inclusive range over a specified latitude
# range.
#
#############################################################################
prom = "[MAPAREA]: "
if not rng:
print(prom + "You must specify a range")
rap = -1
sys.stdout.flush()
return rap
print(prom + "Calculating coverage percentage")
sys.stdout.flush()
if len(limit) != 4:
limit = np.array([0., -90., 360., 90])
else:
limit = np.array(limit)
if not lats:
lats = np.array([limit[1], limit[3]])
if len(lats) != 2:
lats = np.array([lats, lats])
#
# Set variable name
#
# Restore previous map and create corresponding lat/lon maps
#
if type(infile) is str:
if os.path.isfile(infile):
print(prom + "This function is not yet available.")
sys.stdout.flush()
sys.exit()
else:
print(prom + "File does not exist")
print(prom + "This function is not yet available.")
sys.stdout.flush()
r = 3396. # mean equatorial radius of Mars
a = 4.0*np.pi*np.power(r,2.)
print(prom + "Nominal surface area of Mars is {} sq. km..".format(str(a)))
sys.stdout.flush()
sz = np.shape(mp)
nlon = sz[1]
nlat = sz[0]
binlon = (limit[2] - limit[0])/nlon
binlat = (limit[3] - limit[1])/nlat
jmin = int((lats[0] - limit[1])/binlat)
jmax = int((lats[1] - limit[1])/binlat)
area = 0.0
atot = 0.0
valu = 0.0
vtot = 0.0
t_start = dt.now()
for jj in range(jmin, jmax):
binarea = binlon * binlat * np.cos((jj * binlat + limit[1] + binlat / 2.) * np.pi/180.) * \
np.power((2. * np.pi * r / 360.), 2.)
bins = np.nansum(mp[jj,:]) # Get number of filled bins
area = area + binarea*bins # Area of filled bins
atot = atot + binarea*len(mp[jj,:]); #Total area of latitude row
vtot = vtot + binarea*bins
#
# Print statements
#
if np.mod(jj, int(jmax/10)) == 0:
print(prom + "Area S of latitude {} is {}".format(str(jj*binlat+limit[1]), str(area)))
sys.stdout.flush()
if area > 0:
print(prom + "Mean value S of latitude {} is {}".format(str(jj * binlat + limit[1]), str(vtot/area)))
sys.stdout.flush()
rap = 100*area/atot
#
# Print Results
#
print(prom + 'Area of data values between {} and {} is {} sq km'.format(rng[0], rng[1], area))
print(prom + 'Total area between latitudes {} and {} is {} sq km'.format(lats[0], lats[1], atot))
print(prom + 'Relative area is {0:.2f}%'.format(rap))
print(prom + 'Mean value scaled to bin sizes is {}'.format(vtot/area))
print(prom + '... done.')
print(prom + 'Total Time {}'.format(dt.now() - t_start))
sys.stdout.flush()
return rap
def custom_color_tables(table):
color_dir = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/color_tables/'
#
# Custom color table
nlat = 176
avelat = 176
#
if table == 'MOLA':
name = 'MOLA_standard_cmap2.npy'
cmap = np.load(color_dir + name)
cmap = ListedColormap(cmap, name=name,N=None)
else:
cmap = {'red': ((0.0, 0.0, 0.0),
(0.10, 0.0, 0.0),
(0.23, 0.0, 0.0),
(0.33, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.70, 0.5, 0.5),
(1.00, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.10, 0.0, 0.0),
(0.23, 1.0, 1.0),
(0.33, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.50, 0.0, 0.0),
(0.70, 0.30, 0.30),
(1.00, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.10, 1.0, 1.0),
(0.23, 1.0, 1.0),
(0.33, 0.0, 0.0),
(0.4, 0.0, 0.0),
(0.50, 0.0, 0.0),
(0.70, 0.25, 0.25),
(1.00, 1.0, 1.0))}
cmap = LinearSegmentedColormap('TI',cmap,256)
return cmap
def plotsize(limit, mres, tclat):
#
# Calculate the plot size in inches
#
slat = (limit[3] - limit[1]) * mres / 75 / 2.54
if tclat == "":
slon = (limit[2] - limit[0]) * mres / 75 / 2.54
xsize = np.max([slon, 6.5]) + 2
ysize = slat * (xsize - 2) / slon + 3
else:
xsize = np.max([slat, 3.25]) * 2 + 2
ysize = xsize + 1
return xsize, ysize
def polar_stere(lon_w, lon_e, lat_s, lat_n, **kwargs):
'''Returns a Basemap object (NPS/SPS) focused in a region.
lon_w, lon_e, lat_s, lat_n -- Graphic limits in geographical coordinates.
W and S directions are negative.
**kwargs -- Aditional arguments for Basemap object.
'''
lon_0 = lon_w + (lon_e - lon_w) / 2.
ref = lat_s if abs(lat_s) > abs(lat_n) else lat_n
lat_0 = np.copysign(90., ref)
proj = 'npstere' if lat_0 > 0 else 'spstere'
prj = Basemap(projection=proj, lon_0=lon_0, lat_0=lat_0,
boundinglat=0, resolution='c')
lons = [lon_w, lon_e, lon_w, lon_e, lon_0, lon_0]
lats = [lat_s, lat_s, lat_n, lat_n, lat_s, lat_n]
x, y = prj(lons, lats)
ll_lon, ll_lat = prj(min(x), min(y), inverse=True)
ur_lon, ur_lat = prj(max(x), max(y), inverse=True)
return Basemap(projection='stere', lat_0=lat_0, lon_0=lon_0,
llcrnrlon=ll_lon, llcrnrlat=ll_lat,
urcrnrlon=ur_lon, urcrnrlat=ur_lat, **kwargs)
import os, socket
import MySQLdb as SQL# MySQL Interface Library
import _mysql_exceptions
def dbreadconf(config_file):
prom = "[DB_READ_CONFIG]: "
if os.path.isfile(config_file):
with open(config_file) as f:
temp = f.read()
temp = temp.splitlines()
user = str(temp[0])
password = str(temp[1])
database = str(temp[2])
return user, password, database
else:
print(prom + "Database configuration file not found.")
print(prom + "Please ensure the path to the file is correct and try again.")
print(prom + "Exiting...")
return
def dbconnect(user, password, database):
prom = "[DB_CONNECT]: "
hname = socket.gethostname()
if hname == 'hachiwara':
host = 'localhost'
else:
host = 'hachiwara'
# This comment is to test something cool
try:
db = SQL.connect(host=host, user=user, passwd=password, db=database)
return db
except SQL.Error as err:
print(prom + "Error in Connection")
print(err)
print(prom + "Exiting")
return
def SHARAD_Coverage(mapname, orng=np.array([829,100000]), srng = np.array([0., -88., 360., 88.]), orbitids = '', orbittrig = False, roll = 99., rcri = '<', nomap = 0, foldmap = 0, elevmap = 0, replot = 0, update = 0, clat = 0, cset = 0, shresc = 3.0, shresi = 0.750, backgrounddata='MOLA'):
#
# Set Directories
#
config_file = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/dbconfig.txt'
outDir = '/home/mperry/Applications/SHARAD_Coverage/output/'
#
# Global Variables
#
global warn1, warn2, warn3, warn4
#
# Constants
#
startTime = dt.now()
missions = ['PSP', 'ESP', 'EM1', 'EM2', 'EM3', 'EM4', 'EM5']
prog = "SHARAD_coverage"
vers = 0.1
null = np.nan
shresc = 3.0 # SHARAD crossline resolution in km
shresi = 0.750 # SHARAD inline resolution in km
erad = 3396 # Mars equatorial radius in km
d2r = np.pi / 180. # Degress to radians
escl = erad * d2r #Equatorial scale in km/deg
kpd = 59. # kilometers per degree on Mars
shres = int(round(escl/shresc)) # SHARAD res. in pixels/degree at the equator (rounded to nearest pixel)
latbuf = 50 # 50 km latitude buffer for searches (per conversation with Than)
#
# Check input variables
#
mapbase = str(mapname)
if not os.path.isdir(outDir):
os.mkdir(outDir)
log_fname = outDir + mapbase + ".log"
sys.stdout = open(log_fname, 'w')
sys.stderr = sys.stdout
bg_data = 'MOLA' if backgrounddata == 'MOLA' else backgrounddata
if args.update is not None:
update = True
f_update = args.update[0]
if not os.path.isfile(f_update):
print("ERROR: File with which to update does not exist in the current path.")
print("Please move to file to the local directory or specify the file's full path")
print("Exiting")
sys.stdout.flush()
exit()
return
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.