code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
from numpy import random
import numpy as np
from pandas.compat import lrange, lzip, u
from pandas import (compat, DataFrame, Series, Index, MultiIndex,
date_range, isnull)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
from pandas.core.common import PerformanceWarning
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSelectReindex(tm.TestCase, TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
_multiprocess_can_split_ = True
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
self.assertEqual(obj.index.name, 'first')
self.assertEqual(obj.columns.name, 'second')
self.assertEqual(list(df.columns), ['d', 'e', 'f'])
self.assertRaises(ValueError, df.drop, ['g'])
self.assertRaises(ValueError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
self.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
self.assert_index_equal(dropped.columns, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.ix[[1, 2], :])
self.assertRaises(ValueError, simple.drop, 5)
self.assertRaises(ValueError, simple.drop, 'C', 1)
self.assertRaises(ValueError, simple.drop, [1, 5])
self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.ix[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
self.assertTrue(lexsorted_df.columns.is_lexsorted())
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
self.assertFalse(not_lexsorted_df.columns.is_lexsorted())
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(newFrame):
self.assertTrue(tm.equalContents(series.index, newFrame.index))
emptyFrame = self.frame.reindex(Index([]))
self.assertEqual(len(emptyFrame.index), 0)
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
self.assertTrue(np.isnan(self.frame[col][idx]))
else:
self.assertEqual(val, self.frame[col][idx])
else:
self.assertTrue(np.isnan(val))
for col, series in compat.iteritems(nonContigFrame):
self.assertTrue(tm.equalContents(series.index,
nonContigFrame.index))
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
self.assertIs(newFrame.index, self.frame.index)
# length zero
newFrame = self.frame.reindex([])
self.assertTrue(newFrame.empty)
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
self.assertEqual(len(newFrame.index), len(self.frame.index))
self.assertEqual(len(newFrame.columns), len(self.frame.columns))
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
self.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
self.assertFalse(result is self.frame)
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
self.assertEqual(df.index.name, 'iname')
df = df.reindex(Index(np.arange(10), name='tmpname'))
self.assertEqual(df.index.name, 'tmpname')
s = Series(random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
self.assertEqual(df.columns.name, 'iname')
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
self.assertEqual(smaller['A'].dtype, np.int64)
bigger = smaller.reindex(self.intframe.index)
self.assertEqual(bigger['A'].dtype, np.float64)
smaller = self.intframe.reindex(columns=['A', 'B'])
self.assertEqual(smaller['A'].dtype, np.int64)
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
newFrame = self.frame.reindex(columns=['A', 'B', 'E'])
assert_series_equal(newFrame['B'], self.frame['B'])
self.assertTrue(np.isnan(newFrame['E']).all())
self.assertNotIn('C', newFrame)
# length zero
newFrame = self.frame.reindex(columns=[])
self.assertTrue(newFrame.empty)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
self.assertEqual(index_freq, both_freq)
self.assertEqual(index_freq, seq_freq)
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
self.assertTrue(np.isnan(result.values[-5:]).all())
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))
def test_align(self):
af, bf = self.frame.align(self.frame)
self.assertIsNot(af._data, self.frame._data)
af, bf = self.frame.align(self.frame, copy=False)
self.assertIs(af._data, self.frame._data)
# axis = 0
other = self.frame.ix[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
self.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='right', axis=0)
self.assert_index_equal(bf.columns, other.columns)
self.assert_index_equal(bf.index, other.index)
self.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.ix[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
self.assert_index_equal(bf.columns, self.frame.columns)
self.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
self.assertTrue((diff_a_vals == -1).all())
af, bf = self.frame.align(other, join='inner', axis=1)
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
self.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=None)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,
method=None, fill_value=0)
self.assert_index_equal(bf.index, Index([]))
# try to align dataframe to series along bad axis
self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],
join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
self.assertTrue(isinstance(right, Series))
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {}
for c in self.frame.columns:
expected[c] = s
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(right, expected)
# GH 9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
def test_align_fill_method_inner(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('inner', meth, ax, fax)
def test_align_fill_method_outer(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('outer', meth, ax, fax)
def test_align_fill_method_left(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('left', meth, ax, fax)
def test_align_fill_method_right(self):
for meth in ['pad', 'bfill']:
for ax in [0, 1, None]:
for fax in [0, 1]:
self._check_align_fill('right', meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.ix[0:4, :10]
right = self.frame.ix[2:, 6:]
empty = self.frame.ix[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# items
filtered = self.frame.filter(['A', 'B', 'E'])
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
self.assertEqual(len(filtered.columns), 2)
self.assertNotIn('E', filtered)
# other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
self.assertEqual(len(filtered.columns), 2)
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
assert_frame_equal(filtered, expected)
# pass in None
with assertRaisesRegexp(TypeError, 'Must pass'):
self.frame.filter(items=None)
# objects
filtered = self.mixed_frame.filter(like='foo')
self.assertIn('foo', filtered)
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
self.assertTrue('C' in filtered)
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
self.assertEqual(len(filtered.columns), 2)
self.assertIn('AA', filtered)
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
f = lambda x: x.weekday() == 2
result = self.tsframe.select(f, axis=0)
expected = self.tsframe.reindex(
index=self.tsframe.index[[f(x) for x in self.tsframe.index]])
assert_frame_equal(result, expected)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
# TODO should reindex check_names?
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# neg indicies
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
self.assertRaises(IndexError, df.take, [3, 1, 2, 30], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, -31], axis=0)
self.assertRaises(IndexError, df.take, [3, 1, 2, 5], axis=1)
self.assertRaises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# neg indicies
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.ix[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[0][1]))
reindexed = frame.reindex(columns=lrange(3))
self.assertEqual(reindexed.values.dtype, np.object_)
self.assertTrue(isnull(reindexed[1]).all())
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
self.assertIn('foo', reindexed)
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
self.assertNotIn('foo', reindexed)
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
self.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
self.assertEqual(smaller['E'].dtype, np.float64)
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
newFrame = self.frame.reindex_axis(cols, axis=1)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(4), lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(lrange(2), lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
| [
"numpy.random.rand",
"pandas.Index",
"pandas.util.testing.assertRaisesRegexp",
"pandas.MultiIndex.from_tuples",
"numpy.arange",
"pandas.date_range",
"pandas.to_datetime",
"pandas.util.testing.assert_frame_equal",
"datetime.datetime",
"pandas.DataFrame",
"pandas.util.testing.assert_produces_warni... | [((842, 939), 'pandas.DataFrame', 'DataFrame', (['[[1, 2, 3], [3, 4, 5], [5, 6, 7]]'], {'index': "['a', 'b', 'c']", 'columns': "['d', 'e', 'f']"}), "([[1, 2, 3], [3, 4, 5], [5, 6, 7]], index=['a', 'b', 'c'], columns\n =['d', 'e', 'f'])\n", (851, 939), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((1734, 1770), 'pandas.Index', 'Index', (["['a', 'b', 'c']"], {'name': '"""first"""'}), "(['a', 'b', 'c'], name='first')\n", (1739, 1770), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((1903, 1934), 'pandas.Index', 'Index', (["['a', 'c']"], {'name': '"""first"""'}), "(['a', 'c'], name='first')\n", (1908, 1934), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((2070, 2107), 'pandas.Index', 'Index', (["['d', 'e', 'f']"], {'name': '"""second"""'}), "(['d', 'e', 'f'], name='second')\n", (2075, 2107), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((2250, 2282), 'pandas.Index', 'Index', (["['e', 'f']"], {'name': '"""second"""'}), "(['e', 'f'], name='second')\n", (2255, 2282), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((2562, 2592), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (['tuples'], {}), '(tuples)\n', (2584, 2592), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((2779, 2828), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 2, 3, 4], 'B': [0, 1, 2, 3]}"], {}), "({'A': [1, 2, 3, 4], 'B': [0, 1, 2, 3]})\n", (2788, 2828), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((4587, 4619), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'expected'], {}), '(df, expected)\n', (4605, 4619), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((4753, 4838), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('a', ''), ('b1', 'c1'), ('b2', 'c2')]"], {'names': "['b', 'c']"}), "([('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b',\n 'c'])\n", (4775, 4838), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((4871, 4915), 'pandas.DataFrame', 'DataFrame', (['[[1, 3, 4]]'], {'columns': 'lexsorted_mi'}), '([[1, 3, 4]], columns=lexsorted_mi)\n', (4880, 4915), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((5048, 5138), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['a', 'b', 'c', 'd']", 'data': "[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]]"}), "(columns=['a', 'b', 'c', 'd'], data=[[1, 'b1', 'c1', 3], [1, 'b2',\n 'c2', 4]])\n", (5057, 5138), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((5490, 5543), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['lexsorted_df', 'not_lexsorted_df'], {}), '(lexsorted_df, not_lexsorted_df)\n', (5511, 5543), True, 'import pandas.util.testing as tm\n'), ((5721, 5760), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (5742, 5760), True, 'import pandas.util.testing as tm\n'), ((5869, 5923), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['a', 'b']", 'data': '[[1, 11], [0, 22]]'}), "(columns=['a', 'b'], data=[[1, 11], [0, 22]])\n", (5878, 5923), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((5970, 6018), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('a', ''), ('c', 'c1')]"], {}), "([('a', ''), ('c', 'c1')])\n", (5992, 6018), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((6033, 6084), 'pandas.DataFrame', 'DataFrame', ([], {'columns': 'columns', 'data': '[[1, 33], [0, 44]]'}), '(columns=columns, data=[[1, 33], [0, 44]])\n', (6042, 6084), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((6163, 6222), 'pandas.DataFrame', 'DataFrame', ([], {'columns': 'columns', 'data': '[[1, 11, 33], [0, 22, 44]]'}), '(columns=columns, data=[[1, 11, 33], [0, 22, 44]])\n', (6172, 6222), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((6333, 6372), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (6354, 6372), True, 'import pandas.util.testing as tm\n'), ((6489, 6554), 'pandas.DataFrame', 'DataFrame', ([], {'columns': 'columns', 'data': '[[1, 11, 0, 44], [0, 22, 1, 33]]'}), '(columns=columns, data=[[1, 11, 0, 44], [0, 22, 1, 33]])\n', (6498, 6554), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((6689, 6728), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (6710, 6728), True, 'import pandas.util.testing as tm\n'), ((7260, 7286), 'pandas.compat.iteritems', 'compat.iteritems', (['newFrame'], {}), '(newFrame)\n', (7276, 7286), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((8045, 8077), 'pandas.compat.iteritems', 'compat.iteritems', (['nonContigFrame'], {}), '(nonContigFrame)\n', (8061, 8077), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((9127, 9165), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'self.frame'], {}), '(result, self.frame)\n', (9145, 9165), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((9259, 9360), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 2], [3, 5], [7, 11], [9, 23]]'], {'index': '[2, np.nan, 1, 5]', 'columns': "['joe', 'jim']"}), "([[1, 2], [3, 5], [7, 11], [9, 23]], index=[2, np.nan, 1, 5],\n columns=['joe', 'jim'])\n", (9271, 9360), True, 'import pandas as pd\n'), ((9695, 9826), 'pandas.DataFrame', 'pd.DataFrame', (["{'other': ['a', 'b', np.nan, 'c'], 'date': ['2015-03-22', np.nan,\n '2012-01-08', np.nan], 'amount': [2, 3, 4, 5]}"], {}), "({'other': ['a', 'b', np.nan, 'c'], 'date': ['2015-03-22', np.\n nan, '2012-01-08', np.nan], 'amount': [2, 3, 4, 5]})\n", (9707, 9826), True, 'import pandas as pd\n'), ((9934, 9957), 'pandas.to_datetime', 'pd.to_datetime', (['df.date'], {}), '(df.date)\n', (9948, 9957), True, 'import pandas as pd\n'), ((10185, 10216), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['left', 'right'], {}), '(left, right)\n', (10203, 10216), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((11520, 11571), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (["newFrame['B']", "self.frame['B']"], {}), "(newFrame['B'], self.frame['B'])\n", (11539, 11571), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((12146, 12194), 'pandas.date_range', 'date_range', (['"""2012-01-01"""', '"""2012-01-03"""'], {'freq': '"""d"""'}), "('2012-01-01', '2012-01-03', freq='d')\n", (12156, 12194), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((12878, 12914), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (12896, 12914), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((13057, 13093), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13075, 13093), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((13217, 13253), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13235, 13253), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((13385, 13421), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13403, 13421), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((13574, 13610), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13592, 13610), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((13746, 13782), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13764, 13782), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((13947, 13983), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13965, 13983), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((14093, 14112), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (14108, 14112), True, 'import numpy as np\n'), ((14126, 14178), 'pandas.DataFrame', 'DataFrame', (['arr'], {'index': '[1, 2, 3, 4, 5, 1, 2, 3, 4, 5]'}), '(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])\n', (14135, 14178), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((14347, 14383), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (14365, 14383), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((17890, 17941), 'pandas.util.testing.assert_index_equal', 'tm.assert_index_equal', (['left.index', 'self.frame.index'], {}), '(left.index, self.frame.index)\n', (17911, 17941), True, 'import pandas.util.testing as tm\n'), ((17950, 18002), 'pandas.util.testing.assert_index_equal', 'tm.assert_index_equal', (['right.index', 'self.frame.index'], {}), '(right.index, self.frame.index)\n', (17971, 18002), True, 'import pandas.util.testing as tm\n'), ((18123, 18174), 'pandas.util.testing.assert_index_equal', 'tm.assert_index_equal', (['left.index', 'self.frame.index'], {}), '(left.index, self.frame.index)\n', (18144, 18174), True, 'import pandas.util.testing as tm\n'), ((18281, 18352), 'pandas.DataFrame', 'DataFrame', (['expected'], {'index': 'self.frame.index', 'columns': 'self.frame.columns'}), '(expected, index=self.frame.index, columns=self.frame.columns)\n', (18290, 18352), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((18390, 18425), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['right', 'expected'], {}), '(right, expected)\n', (18408, 18425), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((18458, 18501), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 3], 'b': [4, 5, 6]}"], {}), "({'a': [1, 2, 3], 'b': [4, 5, 6]})\n", (18467, 18501), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((18555, 18605), 'pandas.DataFrame', 'DataFrame', (['[[2, 5]]'], {'index': '[1]', 'columns': "['a', 'b']"}), "([[2, 5]], index=[1], columns=['a', 'b'])\n", (18564, 18605), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((18614, 18650), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (18632, 18650), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((18714, 18757), 'pandas.DataFrame', 'DataFrame', (["{'a': [0, 2, 0], 'b': [0, 5, 0]}"], {}), "({'a': [0, 2, 0], 'b': [0, 5, 0]})\n", (18723, 18757), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((18766, 18802), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (18784, 18802), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((19604, 19630), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['aa', 'ea'], {}), '(aa, ea)\n', (19622, 19630), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((19639, 19665), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ab', 'eb'], {}), '(ab, eb)\n', (19657, 19665), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((21856, 21883), 'numpy.ones', 'np.ones', (['(10, 1)'], {'dtype': 'int'}), '((10, 1), dtype=int)\n', (21863, 21883), True, 'import numpy as np\n'), ((21899, 21911), 'pandas.DataFrame', 'DataFrame', (['X'], {}), '(X)\n', (21908, 21911), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((22057, 22093), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (22075, 22093), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((22726, 22757), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expl', 'res1l'], {}), '(expl, res1l)\n', (22744, 22757), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((22766, 22797), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expl', 'res2r'], {}), '(expl, res2r)\n', (22784, 22797), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((22813, 22871), 'pandas.DataFrame', 'pd.DataFrame', (['([0, 0, 1, 1, np.nan, np.nan] * 2)'], {'index': 'midx'}), '([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)\n', (22825, 22871), True, 'import pandas as pd\n'), ((22880, 22911), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expr', 'res1r'], {}), '(expr, res1r)\n', (22898, 22911), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((22920, 22951), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expr', 'res2l'], {}), '(expr, res2l)\n', (22938, 22951), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((23217, 23270), 'pandas.DataFrame', 'pd.DataFrame', (['[0, 1, 2, 3, 6, 7, 8, 9]'], {'index': 'exp_idx'}), '([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)\n', (23229, 23270), True, 'import pandas as pd\n'), ((23279, 23310), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expl', 'res1l'], {}), '(expl, res1l)\n', (23297, 23310), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((23319, 23350), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expl', 'res2r'], {}), '(expl, res2r)\n', (23337, 23350), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((23366, 23411), 'pandas.DataFrame', 'pd.DataFrame', (['([0, 0, 1, 1] * 2)'], {'index': 'exp_idx'}), '([0, 0, 1, 1] * 2, index=exp_idx)\n', (23378, 23411), True, 'import pandas as pd\n'), ((23420, 23451), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expr', 'res1r'], {}), '(expr, res1r)\n', (23438, 23451), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((23460, 23491), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expr', 'res2l'], {}), '(expr, res2l)\n', (23478, 23491), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((24063, 24096), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['res1', 'exp1'], {}), '(res1, exp1)\n', (24084, 24096), True, 'import pandas.util.testing as tm\n'), ((24105, 24139), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['res2', 'exp2'], {}), '(res2, exp2)\n', (24127, 24139), True, 'import pandas.util.testing as tm\n'), ((24207, 24241), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['res1', 'exp2'], {}), '(res1, exp2)\n', (24229, 24241), True, 'import pandas.util.testing as tm\n'), ((24250, 24283), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['res2', 'exp1'], {}), '(res2, exp1)\n', (24271, 24283), True, 'import pandas.util.testing as tm\n'), ((24806, 24844), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['filtered', 'expected'], {}), '(filtered, expected)\n', (24824, 24844), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((25106, 25165), 'pandas.DataFrame', 'DataFrame', (['(0.0)'], {'index': '[0, 1, 2]', 'columns': "[0, 1, '_A', '_B']"}), "(0.0, index=[0, 1, 2], columns=[0, 1, '_A', '_B'])\n", (25115, 25165), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((25336, 25399), 'pandas.DataFrame', 'DataFrame', (['(0.0)'], {'index': '[0, 1, 2]', 'columns': "['A1', 1, 'B', 2, 'C']"}), "(0.0, index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])\n", (25345, 25399), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((25557, 25595), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['filtered', 'expected'], {}), '(filtered, expected)\n', (25575, 25595), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((25616, 25673), 'pandas.DataFrame', 'DataFrame', (['(0.0)'], {'index': '[0, 1, 2]', 'columns': "[0, '0', 1, '1']"}), "(0.0, index=[0, 1, 2], columns=[0, '0', 1, '1'])\n", (25625, 25673), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((25770, 25808), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['filtered', 'expected'], {}), '(filtered, expected)\n', (25788, 25808), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((26538, 26616), 'pandas.DataFrame', 'DataFrame', (["{'aBBa': [1, 2], 'BBaBB': [1, 2], 'aCCa': [1, 2], 'aCCaBB': [1, 2]}"], {}), "({'aBBa': [1, 2], 'BBaBB': [1, 2], 'aCCa': [1, 2], 'aCCaBB': [1, 2]})\n", (26547, 26616), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((26792, 26823), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'exp'], {}), '(result, exp)\n', (26810, 26823), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((26875, 26886), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (26884, 26886), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((26930, 26963), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'empty'], {}), '(result, empty)\n', (26948, 26963), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((27015, 27048), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'empty'], {}), '(result, empty)\n', (27033, 27048), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((27287, 27323), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (27305, 27323), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((27505, 27560), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {'check_names': '(False)'}), '(result, expected, check_names=False)\n', (27523, 27560), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((30812, 30834), 'pandas.Index', 'Index', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (30817, 30834), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((31328, 31370), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['reindexed1', 'reindexed2'], {}), '(reindexed1, reindexed2)\n', (31346, 31370), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((31537, 31579), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['reindexed1', 'reindexed2'], {}), '(reindexed1, reindexed2)\n', (31555, 31579), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((31789, 31829), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['newFrame', 'self.frame'], {}), '(newFrame, self.frame)\n', (31807, 31829), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((31882, 32012), 'pandas.DataFrame', 'DataFrame', (['[[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]]'], {'columns': "['a', 'b']", 'index': '[100.0, 101.0, np.nan, 102.0, 103.0]'}), "([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]], columns=['a',\n 'b'], index=[100.0, 101.0, np.nan, 102.0, 103.0])\n", (31891, 32012), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((32159, 32195), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (32177, 32195), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((32280, 32316), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (32298, 32316), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((32401, 32437), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (32419, 32437), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((32647, 32683), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (32665, 32683), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((32870, 32906), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (32888, 32906), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((33093, 33129), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (33111, 33129), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((33345, 33381), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (33363, 33381), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((2617, 2638), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)'], {}), '(3, 4)\n', (2632, 2638), True, 'import numpy as np\n'), ((4151, 4176), 'pandas.Index', 'pd.Index', (["['X', 'Y', 'X']"], {}), "(['X', 'Y', 'X'])\n", (4159, 4176), True, 'import pandas as pd\n'), ((4442, 4464), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (4457, 4464), True, 'import numpy as np\n'), ((5608, 5654), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['PerformanceWarning'], {}), '(PerformanceWarning)\n', (5634, 5654), True, 'import pandas.util.testing as tm\n'), ((6236, 6275), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (6262, 6275), True, 'import pandas.util.testing as tm\n'), ((6298, 6324), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'on': '"""a"""'}), "(df1, df2, on='a')\n", (6306, 6324), True, 'import pandas as pd\n'), ((6597, 6636), 'pandas.util.testing.assert_produces_warning', 'tm.assert_produces_warning', (['UserWarning'], {}), '(UserWarning)\n', (6623, 6636), True, 'import pandas.util.testing as tm\n'), ((6878, 6909), 'pandas.compat.iteritems', 'compat.iteritems', (['newFrame[col]'], {}), '(newFrame[col])\n', (6894, 6909), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((7404, 7413), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (7409, 7413), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((7657, 7694), 'pandas.compat.iteritems', 'compat.iteritems', (['nonContigFrame[col]'], {}), '(nonContigFrame[col])\n', (7673, 7694), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((10278, 10293), 'numpy.random.rand', 'random.rand', (['(10)'], {}), '(10)\n', (10289, 10293), False, 'from numpy import random\n'), ((10365, 10378), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10374, 10378), True, 'import numpy as np\n'), ((10605, 10620), 'numpy.random.rand', 'random.rand', (['(10)'], {}), '(10)\n', (10616, 10620), False, 'from numpy import random\n'), ((10694, 10707), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10703, 10707), True, 'import numpy as np\n'), ((11904, 11919), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (11911, 11919), True, 'import numpy as np\n'), ((12621, 12643), 'numpy.random.randn', 'np.random.randn', (['(10)', '(4)'], {}), '(10, 4)\n', (12636, 12643), True, 'import numpy as np\n'), ((12691, 12701), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (12697, 12701), False, 'from pandas.compat import lrange, lzip, u\n'), ((12792, 12802), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (12798, 12802), False, 'from pandas.compat import lrange, lzip, u\n'), ((13479, 13489), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (13485, 13489), False, 'from pandas.compat import lrange, lzip, u\n'), ((13645, 13654), 'pandas.compat.lrange', 'lrange', (['(5)'], {}), '(5)\n', (13651, 13654), False, 'from pandas.compat import lrange, lzip, u\n'), ((13861, 13871), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (13867, 13871), False, 'from pandas.compat import lrange, lzip, u\n'), ((16897, 16906), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (16902, 16906), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((17084, 17093), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (17089, 17093), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((17311, 17320), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (17316, 17320), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((17506, 17515), 'pandas.Index', 'Index', (['[]'], {}), '([])\n', (17511, 17515), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((22430, 22458), 'numpy.arange', 'np.arange', (['(12)'], {'dtype': '"""int64"""'}), "(12, dtype='int64')\n", (22439, 22458), True, 'import numpy as np\n'), ((22499, 22526), 'numpy.arange', 'np.arange', (['(2)'], {'dtype': '"""int64"""'}), "(2, dtype='int64')\n", (22508, 22526), True, 'import numpy as np\n'), ((25846, 25888), 'pandas.util.testing.assertRaisesRegexp', 'assertRaisesRegexp', (['TypeError', '"""Must pass"""'], {}), "(TypeError, 'Must pass')\n", (25864, 25888), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((27783, 27819), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (27801, 27819), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((27954, 28009), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {'check_names': '(False)'}), '(result, expected, check_names=False)\n', (27972, 28009), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((28206, 28242), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (28224, 28242), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((28372, 28427), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {'check_names': '(False)'}), '(result, expected, check_names=False)\n', (28390, 28427), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((28941, 28977), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (28959, 28977), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((29119, 29155), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (29137, 29155), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((29358, 29394), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (29376, 29394), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((29526, 29562), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (29544, 29562), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((29779, 29815), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (29797, 29815), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((29950, 29986), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (29968, 29986), False, 'from pandas.util.testing import assert_series_equal, assert_frame_equal, assertRaisesRegexp\n'), ((30050, 30078), 'numpy.ones', 'np.ones', (['(10, 2)'], {'dtype': 'bool'}), '((10, 2), dtype=bool)\n', (30057, 30078), True, 'import numpy as np\n'), ((30210, 30223), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (30219, 30223), True, 'import numpy as np\n'), ((30310, 30333), 'pandas.isnull', 'isnull', (['reindexed[0][1]'], {}), '(reindexed[0][1])\n', (30316, 30333), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((32496, 32517), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (32511, 32517), True, 'import numpy as np\n'), ((32548, 32557), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32554, 32557), False, 'from pandas.compat import lrange, lzip, u\n'), ((32559, 32568), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32565, 32568), False, 'from pandas.compat import lrange, lzip, u\n'), ((32708, 32740), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(3, 3)'], {}), '(0, 10, (3, 3))\n', (32725, 32740), True, 'import numpy as np\n'), ((32771, 32780), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32777, 32780), False, 'from pandas.compat import lrange, lzip, u\n'), ((32782, 32791), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32788, 32791), False, 'from pandas.compat import lrange, lzip, u\n'), ((32931, 32963), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(3, 3)'], {}), '(0, 10, (3, 3))\n', (32948, 32963), True, 'import numpy as np\n'), ((32994, 33003), 'pandas.compat.lrange', 'lrange', (['(2)'], {}), '(2)\n', (33000, 33003), False, 'from pandas.compat import lrange, lzip, u\n'), ((33005, 33014), 'pandas.compat.lrange', 'lrange', (['(2)'], {}), '(2)\n', (33011, 33014), False, 'from pandas.compat import lrange, lzip, u\n'), ((7316, 7362), 'pandas.util.testing.equalContents', 'tm.equalContents', (['series.index', 'newFrame.index'], {}), '(series.index, newFrame.index)\n', (7332, 7362), True, 'import pandas.util.testing as tm\n'), ((8107, 8159), 'pandas.util.testing.equalContents', 'tm.equalContents', (['series.index', 'nonContigFrame.index'], {}), '(series.index, nonContigFrame.index)\n', (8123, 8159), True, 'import pandas.util.testing as tm\n'), ((10502, 10515), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10511, 10515), True, 'import numpy as np\n'), ((12969, 12978), 'pandas.compat.lrange', 'lrange', (['(5)'], {}), '(5)\n', (12975, 12978), False, 'from pandas.compat import lrange, lzip, u\n'), ((13131, 13140), 'pandas.compat.lrange', 'lrange', (['(5)'], {}), '(5)\n', (13137, 13140), False, 'from pandas.compat import lrange, lzip, u\n'), ((13291, 13300), 'pandas.compat.lrange', 'lrange', (['(5)'], {}), '(5)\n', (13297, 13300), False, 'from pandas.compat import lrange, lzip, u\n'), ((21792, 21827), 'numpy.arange', 'np.arange', (['(10 * 10)'], {'dtype': '"""float64"""'}), "(10 * 10, dtype='float64')\n", (21801, 21827), True, 'import numpy as np\n'), ((25470, 25500), 'pandas.Index', 'pd.Index', (['[1, 2]'], {'dtype': 'object'}), '([1, 2], dtype=object)\n', (25478, 25500), True, 'import pandas as pd\n'), ((30112, 30131), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(2)'], {}), '(0, 20, 2)\n', (30121, 30131), True, 'import numpy as np\n'), ((30378, 30387), 'pandas.compat.lrange', 'lrange', (['(3)'], {}), '(3)\n', (30384, 30387), False, 'from pandas.compat import lrange, lzip, u\n'), ((32627, 32636), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32633, 32636), False, 'from pandas.compat import lrange, lzip, u\n'), ((32850, 32859), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32856, 32859), False, 'from pandas.compat import lrange, lzip, u\n'), ((33073, 33082), 'pandas.compat.lrange', 'lrange', (['(2)'], {}), '(2)\n', (33079, 33082), False, 'from pandas.compat import lrange, lzip, u\n'), ((33154, 33175), 'numpy.random.randn', 'np.random.randn', (['(5)', '(3)'], {}), '(5, 3)\n', (33169, 33175), True, 'import numpy as np\n'), ((6978, 6991), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (6986, 6991), True, 'import numpy as np\n'), ((7763, 7776), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (7771, 7776), True, 'import numpy as np\n'), ((9981, 10009), 'pandas.to_datetime', 'pd.to_datetime', (['"""2015-06-18"""'], {}), "('2015-06-18')\n", (9995, 10009), True, 'import pandas as pd\n'), ((11596, 11619), 'numpy.isnan', 'np.isnan', (["newFrame['E']"], {}), "(newFrame['E'])\n", (11604, 11619), True, 'import numpy as np\n'), ((11951, 11971), 'datetime.datetime', 'datetime', (['(2012)', '(1)', '(1)'], {}), '(2012, 1, 1)\n', (11959, 11971), False, 'from datetime import datetime\n'), ((12003, 12023), 'datetime.datetime', 'datetime', (['(2012)', '(1)', '(2)'], {}), '(2012, 1, 2)\n', (12011, 12023), False, 'from datetime import datetime\n'), ((12055, 12075), 'datetime.datetime', 'datetime', (['(2012)', '(1)', '(3)'], {}), '(2012, 1, 3)\n', (12063, 12075), False, 'from datetime import datetime\n'), ((12727, 12755), 'numpy.isnan', 'np.isnan', (['result.values[-5:]'], {}), '(result.values[-5:])\n', (12735, 12755), True, 'import numpy as np\n'), ((12848, 12858), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (12854, 12858), False, 'from pandas.compat import lrange, lzip, u\n'), ((13544, 13554), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (13550, 13554), False, 'from pandas.compat import lrange, lzip, u\n'), ((13917, 13927), 'pandas.compat.lrange', 'lrange', (['(15)'], {}), '(15)\n', (13923, 13927), False, 'from pandas.compat import lrange, lzip, u\n'), ((26137, 26143), 'pandas.compat.u', 'u', (['"""∂"""'], {}), "('∂')\n", (26138, 26143), False, 'from pandas.compat import lrange, lzip, u\n'), ((30474, 30494), 'pandas.isnull', 'isnull', (['reindexed[1]'], {}), '(reindexed[1])\n', (30480, 30494), False, 'from pandas import compat, DataFrame, Series, Index, MultiIndex, date_range, isnull\n'), ((32600, 32609), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32606, 32609), False, 'from pandas.compat import lrange, lzip, u\n'), ((32823, 32832), 'pandas.compat.lrange', 'lrange', (['(4)'], {}), '(4)\n', (32829, 32832), False, 'from pandas.compat import lrange, lzip, u\n'), ((33046, 33055), 'pandas.compat.lrange', 'lrange', (['(2)'], {}), '(2)\n', (33052, 33055), False, 'from pandas.compat import lrange, lzip, u\n'), ((7217, 7230), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (7225, 7230), True, 'import numpy as np\n'), ((8002, 8015), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (8010, 8015), True, 'import numpy as np\n'), ((13717, 13726), 'pandas.compat.lrange', 'lrange', (['(5)'], {}), '(5)\n', (13723, 13726), False, 'from pandas.compat import lrange, lzip, u\n'), ((7033, 7063), 'numpy.isnan', 'np.isnan', (['self.frame[col][idx]'], {}), '(self.frame[col][idx])\n', (7041, 7063), True, 'import numpy as np\n'), ((7818, 7848), 'numpy.isnan', 'np.isnan', (['self.frame[col][idx]'], {}), '(self.frame[col][idx])\n', (7826, 7848), True, 'import numpy as np\n')] |
import numpy as np
import albumentations as A
import cv2
# overlayed white mask over image
def mask_overlay(image, mask, color=(0, 0, 255), resize=(320, 320)):
"""
Helper function to visualize mask on the top of the car
"""
if resize:
resizer = A.Resize(*resize)
image = resizer(image=image)['image']
mask = np.dstack((mask, mask, mask)) * np.array(color)
mask = mask.astype(np.uint8)
weighted_sum = cv2.addWeighted(mask, 0.5, image, 0.5, 0.)
img = image.copy()
ind = mask[:, :, 2] > 0
img[ind] = weighted_sum[ind]
return img | [
"numpy.dstack",
"cv2.addWeighted",
"numpy.array",
"albumentations.Resize"
] | [((442, 485), 'cv2.addWeighted', 'cv2.addWeighted', (['mask', '(0.5)', 'image', '(0.5)', '(0.0)'], {}), '(mask, 0.5, image, 0.5, 0.0)\n', (457, 485), False, 'import cv2\n'), ((269, 286), 'albumentations.Resize', 'A.Resize', (['*resize'], {}), '(*resize)\n', (277, 286), True, 'import albumentations as A\n'), ((342, 371), 'numpy.dstack', 'np.dstack', (['(mask, mask, mask)'], {}), '((mask, mask, mask))\n', (351, 371), True, 'import numpy as np\n'), ((374, 389), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (382, 389), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 07 11:51:55 2015
@author: agirard
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Embed font type in PDF
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
###############################################################################
# Phase Plot Object for phase plane analysis
###############################################################################
class PhasePlot:
"""
Continous dynamic system phase plot
---------------------------------------------------
x_axis : index of state to display as x axis
y_axis : index of state to display as y axis
"""
############################
def __init__(self, ContinuousDynamicSystem , x_axis = 0 , y_axis = 1):
# System
self.cds = ContinuousDynamicSystem
self.f = self.cds.f # dynamic function
self.xbar = np.copy( self.cds.xbar ) # default state
self.ubar = np.copy( self.cds.ubar ) # default input
self.t = 0 # default time
# Grid
self.x_axis = x_axis # Index of state to plot on x axis
self.y_axis = y_axis # Index of state to plot on y axis
self.x_axis_min = self.cds.x_lb[ self.x_axis ]
self.x_axis_max = self.cds.x_ub[ self.x_axis ]
self.y_axis_min = self.cds.x_lb[ self.y_axis ]
self.y_axis_max = self.cds.x_ub[ self.y_axis ]
self.x_axis_n = 21
self.y_axis_n = 21
# Plotting params
self.color = 'b'
self.figsize = (3, 2)
self.dpi = 300
self.linewidth = 0.005
self.streamplot = False
self.arrowstyle = '->'
self.headlength = 4.5
self.fontsize = 6
###########################################################################
def compute_grid(self):
x = np.linspace( self.x_axis_min , self.x_axis_max , self.x_axis_n )
y = np.linspace( self.y_axis_min , self.y_axis_max , self.y_axis_n )
self.X, self.Y = np.meshgrid( x, y)
###########################################################################
def compute_vector_field(self):
self.v = np.zeros(self.X.shape)
self.w = np.zeros(self.Y.shape)
for i in range(self.x_axis_n):
for j in range(self.y_axis_n):
# Actual states
x = np.copy( self.xbar ) # default value for all states
x[ self.x_axis ] = self.X[i, j]
x[ self.y_axis ] = self.Y[i, j]
# States derivative open loop
dx = self.f( x , self.ubar , self.t )
# Assign vector components
self.v[i,j] = dx[ self.x_axis ]
self.w[i,j] = dx[ self.y_axis ]
###########################################################################
def plot_init(self):
self.phasefig = plt.figure( figsize = self.figsize , dpi = self.dpi,
frameon=True)
self.phasefig.canvas.set_window_title('Phase plane of ' +
self.cds.name )
###########################################################################
def plot_vector_field(self):
try:
self.ax = self.phasefig.axes[0]
except IndexError:
self.ax = self.phasefig.add_subplot(111, autoscale_on=False )
if self.streamplot:
self.ax.streamplot( self.X, self.Y, self.v, self.w,
color =self.color,
linewidth = self.linewidth,
arrowstyle = self.arrowstyle,
arrowsize = self.headlength )
else:
self.ax.quiver( self.X, self.Y, self.v, self.w,
color = self.color,
linewidth = self.linewidth)
#, headlength = self.headlength )
###########################################################################
def plot_finish(self):
self.ax.set_xlabel(self.cds.state_label[ self.x_axis ] + ' ' +
self.cds.state_units[ self.x_axis ] ,
fontsize = self.fontsize)
self.ax.set_ylabel(self.cds.state_label[ self.y_axis ] + ' ' +
self.cds.state_units[ self.y_axis ] ,
fontsize = self.fontsize)
self.ax.set_xlim([ self.x_axis_min , self.x_axis_max ])
self.ax.set_ylim([ self.y_axis_min , self.y_axis_max ])
self.ax.grid(True)
self.phasefig.tight_layout()
###########################################################################
def plot(self):
""" Plot phase plane """
self.compute_grid()
self.plot_init()
self.compute_vector_field()
self.plot_vector_field()
self.plot_finish()
self.phasefig.show()
###############################################################################
# 3D Phase Plot Object for phase plane analysis
###############################################################################
class PhasePlot3( PhasePlot ):
"""
Continous dynamic system phase plot 3D
---------------------------------------------------
x_axis : index of state to display as x axis
y_axis : index of state to display as y axis
z_axis : index of state to display as z axis
"""
###########################################################################
def __init__(self, ContinuousDynamicSystem, x_axis=0, y_axis=1, z_axis=2):
PhasePlot.__init__(self, ContinuousDynamicSystem, x_axis, y_axis)
# Smaller resolution
self.x_axis_n = 5
self.y_axis_n = 5
self.z_axis_n = 5
# Z axis
self.z_axis = z_axis
self.z_axis_min = self.cds.x_lb[ self.z_axis ]
self.z_axis_max = self.cds.x_ub[ self.z_axis ]
# Plotting params
self.color = 'r'
self.dpi = 200
self.linewidth = 0.5
self.length = 0.2
self.arrowstyle = '->'
self.fontsize = 6
###########################################################################
def compute_grid(self):
x = np.linspace( self.x_axis_min , self.x_axis_max , self.x_axis_n )
y = np.linspace( self.y_axis_min , self.y_axis_max , self.y_axis_n )
z = np.linspace( self.z_axis_min , self.z_axis_max , self.z_axis_n )
self.X, self.Y, self.Z = np.meshgrid( x, y, z)
###########################################################################
def compute_vector_field(self):
self.v = np.zeros(self.X.shape)
self.w = np.zeros(self.Y.shape)
self.u = np.zeros(self.Z.shape)
for i in range(self.x_axis_n):
for j in range(self.y_axis_n):
for k in range(self.z_axis_n):
# Actual states
x = np.copy( self.xbar ) # default value for all states
x[ self.x_axis ] = self.X[i, j, k]
x[ self.y_axis ] = self.Y[i, j, k]
x[ self.z_axis ] = self.Z[i, j, k]
# States derivative open loop
dx = self.f( x , self.ubar , self.t )
# Assign vector components
self.v[i,j,k] = dx[ self.x_axis ]
self.w[i,j,k] = dx[ self.y_axis ]
self.u[i,j,k] = dx[ self.z_axis ]
###########################################################################
def plot_vector_field(self):
try:
self.ax = self.phasefig.axes[0]
except IndexError:
self.ax = self.phasefig.add_subplot(111, projection='3d')
self.ax.quiver( self.X, self.Y, self.Z, self.v, self.w, self.u,
color=self.color, linewidth = self.linewidth,
length = self.length)
#, headlength = self.headlength, normalize = True )
###########################################################################
def plot_finish(self):
self.ax.set_xlabel(self.cds.state_label[ self.x_axis ] + ' ' +
self.cds.state_units[ self.x_axis ] , fontsize = self.fontsize)
self.ax.set_ylabel(self.cds.state_label[ self.y_axis ] + ' ' +
self.cds.state_units[ self.y_axis ] , fontsize = self.fontsize)
self.ax.set_zlabel(self.cds.state_label[ self.z_axis ] + ' ' +
self.cds.state_units[ self.z_axis ] , fontsize = self.fontsize)
plt.grid(True)
plt.tight_layout()
| [
"numpy.copy",
"matplotlib.pyplot.grid",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"numpy.meshgrid"
] | [((976, 998), 'numpy.copy', 'np.copy', (['self.cds.xbar'], {}), '(self.cds.xbar)\n', (983, 998), True, 'import numpy as np\n'), ((1038, 1060), 'numpy.copy', 'np.copy', (['self.cds.ubar'], {}), '(self.cds.ubar)\n', (1045, 1060), True, 'import numpy as np\n'), ((2020, 2080), 'numpy.linspace', 'np.linspace', (['self.x_axis_min', 'self.x_axis_max', 'self.x_axis_n'], {}), '(self.x_axis_min, self.x_axis_max, self.x_axis_n)\n', (2031, 2080), True, 'import numpy as np\n'), ((2097, 2157), 'numpy.linspace', 'np.linspace', (['self.y_axis_min', 'self.y_axis_max', 'self.y_axis_n'], {}), '(self.y_axis_min, self.y_axis_max, self.y_axis_n)\n', (2108, 2157), True, 'import numpy as np\n'), ((2196, 2213), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2207, 2213), True, 'import numpy as np\n'), ((2379, 2401), 'numpy.zeros', 'np.zeros', (['self.X.shape'], {}), '(self.X.shape)\n', (2387, 2401), True, 'import numpy as np\n'), ((2419, 2441), 'numpy.zeros', 'np.zeros', (['self.Y.shape'], {}), '(self.Y.shape)\n', (2427, 2441), True, 'import numpy as np\n'), ((3215, 3275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'self.figsize', 'dpi': 'self.dpi', 'frameon': '(True)'}), '(figsize=self.figsize, dpi=self.dpi, frameon=True)\n', (3225, 3275), True, 'import matplotlib.pyplot as plt\n'), ((6860, 6920), 'numpy.linspace', 'np.linspace', (['self.x_axis_min', 'self.x_axis_max', 'self.x_axis_n'], {}), '(self.x_axis_min, self.x_axis_max, self.x_axis_n)\n', (6871, 6920), True, 'import numpy as np\n'), ((6937, 6997), 'numpy.linspace', 'np.linspace', (['self.y_axis_min', 'self.y_axis_max', 'self.y_axis_n'], {}), '(self.y_axis_min, self.y_axis_max, self.y_axis_n)\n', (6948, 6997), True, 'import numpy as np\n'), ((7014, 7074), 'numpy.linspace', 'np.linspace', (['self.z_axis_min', 'self.z_axis_max', 'self.z_axis_n'], {}), '(self.z_axis_min, self.z_axis_max, self.z_axis_n)\n', (7025, 7074), True, 'import numpy as np\n'), ((7121, 7141), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (7132, 7141), True, 'import numpy as np\n'), ((7307, 7329), 'numpy.zeros', 'np.zeros', (['self.X.shape'], {}), '(self.X.shape)\n', (7315, 7329), True, 'import numpy as np\n'), ((7347, 7369), 'numpy.zeros', 'np.zeros', (['self.Y.shape'], {}), '(self.Y.shape)\n', (7355, 7369), True, 'import numpy as np\n'), ((7387, 7409), 'numpy.zeros', 'np.zeros', (['self.Z.shape'], {}), '(self.Z.shape)\n', (7395, 7409), True, 'import numpy as np\n'), ((9362, 9376), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (9370, 9376), True, 'import matplotlib.pyplot as plt\n'), ((9385, 9403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9401, 9403), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2621), 'numpy.copy', 'np.copy', (['self.xbar'], {}), '(self.xbar)\n', (2610, 2621), True, 'import numpy as np\n'), ((7626, 7644), 'numpy.copy', 'np.copy', (['self.xbar'], {}), '(self.xbar)\n', (7633, 7644), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# esice_goffgratch.m
import numpy as num
def esice_goffgratch(T):
"""svp = esice_goffgratch(T)
Compute water vapor saturation pressure over ice
using Goff-Gratch formulation. Adopted from PvD's
svp_ice.pro.
Inputs:
T temperature [Kelvin]
Output:
svp saturation pressure [mbar]
Notes: svp returned for all values of input T,
but results not valid for T >= 370 K and
T <= 160 K.
Reference: Goff-Gratch formulation from sixth revised
edition of Smithsonian Meteorology Tables.
DCT 8/22/00
"""
ewi = 6.1071
c1 = 9.09718
c2 = 3.56654
c3 = 0.876793
ratio = 273.15/T
tmp = (( -c1 * ( ratio - 1.0 ) ) -
( c2 * num.log10( ratio ) ) +
( c3 * ( 1.0 - ( 1.0 / ratio ) ) ) +
num.log10( ewi ))
svp = 10.0**tmp
return svp
if __name__ == '__main__':
print(esice_goffgratch.__doc__)
t = num.array(
( 24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.10, 14.69, 13.22, 11.52,
9.53, 7.24, 4.80, 2.34, 0.04, -2.29, -4.84, -7.64,-10.66,-13.95,
-17.54,-21.45,-25.58,-29.90,-34.33,-38.94,-43.78,-48.80,-53.94,-58.79,
-63.27,-67.32,-70.74,-73.62,-75.74,-77.07,-77.43,-76.63,-75.06,-73.14,
-71.43 ))
t = t + 273.15
e = esice_goffgratch(t)
print(e)
| [
"numpy.array",
"numpy.log10"
] | [((888, 1219), 'numpy.array', 'num.array', (['(24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.1, 14.69, 13.22, 11.52, 9.53,\n 7.24, 4.8, 2.34, 0.04, -2.29, -4.84, -7.64, -10.66, -13.95, -17.54, -\n 21.45, -25.58, -29.9, -34.33, -38.94, -43.78, -48.8, -53.94, -58.79, -\n 63.27, -67.32, -70.74, -73.62, -75.74, -77.07, -77.43, -76.63, -75.06, \n -73.14, -71.43)'], {}), '((24.54, 23.16, 21.67, 20.23, 18.86, 17.49, 16.1, 14.69, 13.22, \n 11.52, 9.53, 7.24, 4.8, 2.34, 0.04, -2.29, -4.84, -7.64, -10.66, -13.95,\n -17.54, -21.45, -25.58, -29.9, -34.33, -38.94, -43.78, -48.8, -53.94, -\n 58.79, -63.27, -67.32, -70.74, -73.62, -75.74, -77.07, -77.43, -76.63, \n -75.06, -73.14, -71.43))\n', (897, 1219), True, 'import numpy as num\n'), ((770, 784), 'numpy.log10', 'num.log10', (['ewi'], {}), '(ewi)\n', (779, 784), True, 'import numpy as num\n'), ((691, 707), 'numpy.log10', 'num.log10', (['ratio'], {}), '(ratio)\n', (700, 707), True, 'import numpy as num\n')] |
import sys
import os
import numpy as np
import cv2
import torch
from model import *
from scipy.ndimage.filters import gaussian_filter
from loss import kldiv, cc, nss
import argparse
from torch.utils.data import DataLoader
from dataloader import DHF1KDataset
from utils import *
import time
from tqdm import tqdm
from torchvision import transforms, utils
from os.path import join
import torchaudio
from signal_utils import *
import json
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
def make_dataset(root, video_names, audio_path, video_fps):
dataset = []
audiodata= {}
for i in range(len(video_names)):
if i % 100 == 0:
print('dataset loading [{}/{}]'.format(i, len(video_names)))
n_frames = len(os.listdir(join(root, 'video_frames', video_names[i])))
if n_frames <= 1:
print("Less frames")
continue
begin_t = 1
end_t = n_frames
audio_wav_path = os.path.join(audio_path,video_names[i]+'.wav')
if not os.path.exists(audio_wav_path):
print("Not exists", audio_wav_path)
continue
[audiowav,Fs] = torchaudio.load(audio_wav_path, normalization=False)
downsample_rate=22050
downsample_resample = torchaudio.transforms.Resample(
Fs, downsample_rate, resampling_method='sinc_interpolation')
audiowav = downsample_resample(audiowav)
audiowav = audiowav * (2 ** -23)
n_samples = Fs/float(video_fps[video_names[i]])
starts=np.zeros(n_frames+1, dtype=int)
ends=np.zeros(n_frames+1, dtype=int)
starts[0]=0
ends[0]=0
for videoframe in range(1,n_frames+1):
startemp=max(0,((videoframe-1)*(1.0/float(video_fps[video_names[i]]))*Fs)-n_samples/2)
starts[videoframe] = int(startemp)
endtemp=min(audiowav.shape[1],abs(((videoframe-1)*(1.0/float(video_fps[video_names[i]]))*Fs)+n_samples/2))
ends[videoframe] = int(endtemp)
audioinfo = {
'audiopath': audio_path,
'video_id': video_names[i],
'Fs' : Fs,
'wav' : audiowav,
'starts': starts,
'ends' : ends
}
audiodata[video_names[i]] = audioinfo
return audiodata
def get_audio_feature(audioind, audiodata, args, start_idx):
len_snippet = args.clip_size
max_audio_Fs = 22050
min_video_fps = 10
max_audio_win = int(max_audio_Fs / min_video_fps * 32)
audioexcer = torch.zeros(1,max_audio_win)
valid = {}
valid['audio']=0
if audioind in audiodata:
excerptstart = audiodata[audioind]['starts'][start_idx+1]
if start_idx+len_snippet >= len(audiodata[audioind]['ends']):
print("Exceeds size", audioind)
sys.stdout.flush()
excerptend = audiodata[audioind]['ends'][-1]
else:
excerptend = audiodata[audioind]['ends'][start_idx+len_snippet]
try:
valid['audio'] = audiodata[audioind]['wav'][:, excerptstart:excerptend+1].shape[1]
except:
pass
audioexcer_tmp = audiodata[audioind]['wav'][:, excerptstart:excerptend+1]
if (valid['audio']%2)==0:
audioexcer[:,((audioexcer.shape[1]//2)-(valid['audio']//2)):((audioexcer.shape[1]//2)+(valid['audio']//2))] = \
torch.from_numpy(np.hanning(audioexcer_tmp.shape[1])).float() * audioexcer_tmp
else:
audioexcer[:,((audioexcer.shape[1]//2)-(valid['audio']//2)):((audioexcer.shape[1]//2)+(valid['audio']//2)+1)] = \
torch.from_numpy(np.hanning(audioexcer_tmp.shape[1])).float() * audioexcer_tmp
audio_feature = audioexcer.view(1, 1,-1,1)
return audio_feature
def validate(args):
path_indata = args.path_indata
file_weight = args.file_weight
len_temporal = args.clip_size
if args.use_sound:
model = VideoAudioSaliencyModel(
transformer_in_channel=args.transformer_in_channel,
use_transformer=False,
nhead=args.nhead,
use_upsample=bool(args.decoder_upsample),
num_hier=args.num_hier,
num_clips=args.clip_size
)
else:
model = VideoSaliencyModel(
transformer_in_channel=args.transformer_in_channel,
nhead=args.nhead,
use_upsample=bool(args.decoder_upsample),
num_hier=args.num_hier,
num_clips=args.clip_size
)
model.load_state_dict(torch.load(file_weight))
model = model.to(device)
torch.backends.cudnn.benchmark = False
model.eval()
list_indata = []
with open(join(args.path_indata, 'fps.json'), 'r') as f:
video_fps = json.load(f)
for i in video_fps:
list_indata.append(i)
list_indata.sort()
if args.use_sound:
audiodata = make_dataset(
args.path_indata,
list_indata,
join(args.path_indata, 'video_audio'),
video_fps
)
if args.start_idx!=-1:
_len = (1.0/float(args.num_parts))*len(list_indata)
list_indata = list_indata[int((args.start_idx-1)*_len): int(args.start_idx*_len)]
for dname in list_indata:
print ('processing ' + dname, flush=True)
list_frames = [f for f in os.listdir(os.path.join(path_indata, 'video_frames', dname)) if os.path.isfile(os.path.join(path_indata, 'video_frames', dname, f))]
list_frames.sort()
os.makedirs(join(args.save_path, dname), exist_ok=True)
if len(list_frames) >= 2*len_temporal-1:
snippet = []
for i in range(len(list_frames)):
torch_img, img_size = torch_transform(os.path.join(path_indata, 'video_frames', dname, list_frames[i]))
snippet.append(torch_img)
if i >= len_temporal-1:
clip = torch.FloatTensor(torch.stack(snippet, dim=0)).unsqueeze(0)
clip = clip.permute((0,2,1,3,4))
audio_feature = None
if args.use_sound:
audio_feature = get_audio_feature(dname, audiodata, args, i-len_temporal+1)
process(model, clip, path_indata, dname, list_frames[i], args, img_size, audio_feature=audio_feature)
if i < 2*len_temporal-2:
if args.use_sound:
audio_feature = torch.flip(audio_feature, [2])
process(model, torch.flip(clip, [2]), path_indata, dname, list_frames[i-len_temporal+1], args, img_size, audio_feature=audio_feature)
del snippet[0]
else:
print (' more frames are needed')
def torch_transform(path):
img_transform = transforms.Compose([
transforms.Resize((224, 384)),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
)
])
img = Image.open(path).convert('RGB')
sz = img.size
img = img_transform(img)
return img, sz
def blur(img):
k_size = 11
bl = cv2.GaussianBlur(img,(k_size,k_size),0)
return torch.FloatTensor(bl)
def process(model, clip, path_inpdata, dname, frame_no, args, img_size, audio_feature=None):
with torch.no_grad():
if audio_feature==None:
smap = model(clip.to(device)).cpu().data[0]
else:
smap = model(clip.to(device), audio_feature.to(device)).cpu().data[0]
smap = smap.numpy()
smap = cv2.resize(smap, (img_size[0], img_size[1]))
smap = blur(smap)
img_save(smap, join(args.save_path, dname, frame_no), normalize=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_weight',default="./saved_models/no_trans_upsampling_reduced.pt", type=str)
parser.add_argument('--nhead',default=4, type=int)
parser.add_argument('--num_encoder_layers',default=3, type=int)
parser.add_argument('--transformer_in_channel',default=512, type=int)
parser.add_argument('--save_path',default='/ssd_scratch/cvit/samyak/Results/diem_test', type=str)
parser.add_argument('--start_idx',default=-1, type=int)
parser.add_argument('--num_parts',default=4, type=int)
parser.add_argument('--split',default=1, type=int)
parser.add_argument('--path_indata',default='/ssd_scratch/cvit/samyak/data/', type=str)
parser.add_argument('--dataset',default='DIEM', type=str)
parser.add_argument('--multi_frame',default=0, type=int)
parser.add_argument('--decoder_upsample',default=1, type=int)
parser.add_argument('--num_decoder_layers',default=-1, type=int)
parser.add_argument('--num_hier',default=3, type=int)
parser.add_argument('--clip_size',default=32, type=int)
parser.add_argument('--use_sound',default=False, type=bool)
args = parser.parse_args()
print(args)
validate(args)
| [
"numpy.hanning",
"torchaudio.load",
"torch.cuda.is_available",
"torch.flip",
"os.path.exists",
"argparse.ArgumentParser",
"sys.stdout.flush",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"cv2.resize",
"cv2.GaussianBlur",
"torch.load"... | [((2242, 2271), 'torch.zeros', 'torch.zeros', (['(1)', 'max_audio_win'], {}), '(1, max_audio_win)\n', (2253, 2271), False, 'import torch\n'), ((6140, 6182), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(k_size, k_size)', '(0)'], {}), '(img, (k_size, k_size), 0)\n', (6156, 6182), False, 'import cv2\n'), ((6188, 6209), 'torch.FloatTensor', 'torch.FloatTensor', (['bl'], {}), '(bl)\n', (6205, 6209), False, 'import torch\n'), ((6512, 6556), 'cv2.resize', 'cv2.resize', (['smap', '(img_size[0], img_size[1])'], {}), '(smap, (img_size[0], img_size[1]))\n', (6522, 6556), False, 'import cv2\n'), ((6686, 6711), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6709, 6711), False, 'import argparse\n'), ((470, 495), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (493, 495), False, 'import torch\n'), ((915, 964), 'os.path.join', 'os.path.join', (['audio_path', "(video_names[i] + '.wav')"], {}), "(audio_path, video_names[i] + '.wav')\n", (927, 964), False, 'import os\n'), ((1072, 1124), 'torchaudio.load', 'torchaudio.load', (['audio_wav_path'], {'normalization': '(False)'}), '(audio_wav_path, normalization=False)\n', (1087, 1124), False, 'import torchaudio\n'), ((1173, 1269), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', (['Fs', 'downsample_rate'], {'resampling_method': '"""sinc_interpolation"""'}), "(Fs, downsample_rate, resampling_method=\n 'sinc_interpolation')\n", (1203, 1269), False, 'import torchaudio\n'), ((1412, 1445), 'numpy.zeros', 'np.zeros', (['(n_frames + 1)'], {'dtype': 'int'}), '(n_frames + 1, dtype=int)\n', (1420, 1445), True, 'import numpy as np\n'), ((1451, 1484), 'numpy.zeros', 'np.zeros', (['(n_frames + 1)'], {'dtype': 'int'}), '(n_frames + 1, dtype=int)\n', (1459, 1484), True, 'import numpy as np\n'), ((3952, 3975), 'torch.load', 'torch.load', (['file_weight'], {}), '(file_weight)\n', (3962, 3975), False, 'import torch\n'), ((4152, 4164), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4161, 4164), False, 'import json\n'), ((6310, 6325), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6323, 6325), False, 'import torch\n'), ((6593, 6630), 'os.path.join', 'join', (['args.save_path', 'dname', 'frame_no'], {}), '(args.save_path, dname, frame_no)\n', (6597, 6630), False, 'from os.path import join\n'), ((971, 1001), 'os.path.exists', 'os.path.exists', (['audio_wav_path'], {}), '(audio_wav_path)\n', (985, 1001), False, 'import os\n'), ((2492, 2510), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2508, 2510), False, 'import sys\n'), ((4091, 4125), 'os.path.join', 'join', (['args.path_indata', '"""fps.json"""'], {}), "(args.path_indata, 'fps.json')\n", (4095, 4125), False, 'from os.path import join\n'), ((4321, 4358), 'os.path.join', 'join', (['args.path_indata', '"""video_audio"""'], {}), "(args.path_indata, 'video_audio')\n", (4325, 4358), False, 'from os.path import join\n'), ((4816, 4843), 'os.path.join', 'join', (['args.save_path', 'dname'], {}), '(args.save_path, dname)\n', (4820, 4843), False, 'from os.path import join\n'), ((5865, 5894), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 384)'], {}), '((224, 384))\n', (5882, 5894), False, 'from torchvision import transforms, utils\n'), ((5899, 5920), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5918, 5920), False, 'from torchvision import transforms, utils\n'), ((5925, 5991), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (5945, 5991), False, 'from torchvision import transforms, utils\n'), ((760, 802), 'os.path.join', 'join', (['root', '"""video_frames"""', 'video_names[i]'], {}), "(root, 'video_frames', video_names[i])\n", (764, 802), False, 'from os.path import join\n'), ((4651, 4699), 'os.path.join', 'os.path.join', (['path_indata', '"""video_frames"""', 'dname'], {}), "(path_indata, 'video_frames', dname)\n", (4663, 4699), False, 'import os\n'), ((4719, 4770), 'os.path.join', 'os.path.join', (['path_indata', '"""video_frames"""', 'dname', 'f'], {}), "(path_indata, 'video_frames', dname, f)\n", (4731, 4770), False, 'import os\n'), ((5000, 5064), 'os.path.join', 'os.path.join', (['path_indata', '"""video_frames"""', 'dname', 'list_frames[i]'], {}), "(path_indata, 'video_frames', dname, list_frames[i])\n", (5012, 5064), False, 'import os\n'), ((2986, 3021), 'numpy.hanning', 'np.hanning', (['audioexcer_tmp.shape[1]'], {}), '(audioexcer_tmp.shape[1])\n', (2996, 3021), True, 'import numpy as np\n'), ((3194, 3229), 'numpy.hanning', 'np.hanning', (['audioexcer_tmp.shape[1]'], {}), '(audioexcer_tmp.shape[1])\n', (3204, 3229), True, 'import numpy as np\n'), ((5559, 5589), 'torch.flip', 'torch.flip', (['audio_feature', '[2]'], {}), '(audio_feature, [2])\n', (5569, 5589), False, 'import torch\n'), ((5611, 5632), 'torch.flip', 'torch.flip', (['clip', '[2]'], {}), '(clip, [2])\n', (5621, 5632), False, 'import torch\n'), ((5160, 5187), 'torch.stack', 'torch.stack', (['snippet'], {'dim': '(0)'}), '(snippet, dim=0)\n', (5171, 5187), False, 'import torch\n')] |
import PyKinectV2
from PyKinectV2 import *
import ctypes
import _ctypes
from _ctypes import COMError
import comtypes
import sys
import numpy
import time, pdb
import importlib
if sys.hexversion >= 0x03000000:
import _thread as thread
else:
import thread
KINECT_MAX_BODY_COUNT = 6
class PyKinectRuntime(object):
"""manages Kinect objects and simplifying access to them"""
def __init__(self, frame_source_types):
# recipe to get address of surface: http://archives.seul.org/pygame/users/Apr-2008/msg00218.html
is_64bits = sys.maxsize > 2**32
if not is_64bits:
self.Py_ssize_t = ctypes.c_int
else:
self.Py_ssize_t = ctypes.c_int64
self._PyObject_AsWriteBuffer = ctypes.pythonapi.PyObject_AsWriteBuffer
self._PyObject_AsWriteBuffer.restype = ctypes.c_int
self._PyObject_AsWriteBuffer.argtypes = [ctypes.py_object,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(self.Py_ssize_t)]
#self._color_frame_ready = PyKinectV2._event()
#self._depth_frame_ready = PyKinectV2._event()
#self._body_frame_ready = PyKinectV2._event()
#self._body_index_frame_ready = PyKinectV2._event()
#self._infrared_frame_ready = PyKinectV2._event()
#self._long_exposure_infrared_frame_ready = PyKinectV2._event()
#self._audio_frame_ready = PyKinectV2._event()
self._close_event = ctypes.windll.kernel32.CreateEventW(None, False, False, None)
self._color_frame_arrived_event = 0
self._depth_frame_arrived_event = 0
self._body_frame_arrived_event = 0
self._body_index_frame_arrived_event = 0
self._infrared_frame_arrived_event = 0
self._long_exposure_infrared_frame_arrived_event = 0
self._audio_frame_arrived_event = 0
self._color_frame_lock = thread.allocate()
self._depth_frame_lock = thread.allocate()
self._body_frame_lock = thread.allocate()
self._body_index_frame_lock = thread.allocate()
self._infrared_frame_lock = thread.allocate()
self._long_exposure_infrared_frame_lock = thread.allocate()
self._audio_frame_lock = thread.allocate()
#initialize sensor
self._sensor = ctypes.POINTER(PyKinectV2.IKinectSensor)()
hres = ctypes.windll.kinect20.GetDefaultKinectSensor(ctypes.byref(self._sensor))
hres = self._sensor.Open()
self._mapper = self._sensor.CoordinateMapper
self.frame_source_types = frame_source_types
self.max_body_count = KINECT_MAX_BODY_COUNT
self._handles = (ctypes.c_voidp * 8)()
self._handles[0] = self._close_event
self._handles[1] = self._close_event
self._handles[2] = self._close_event
self._handles[3] = self._close_event
self._handles[4] = self._close_event
self._handles[5] = self._close_event
self._handles[6] = self._close_event
self._handles[7] = self._close_event
self._waitHandleCount = 1
self._color_source = self._sensor.ColorFrameSource
self.color_frame_desc = self._color_source.FrameDescription
self._infrared_source = self._sensor.InfraredFrameSource
self.infrared_frame_desc = self._infrared_source.FrameDescription
self._depth_source = self._sensor.DepthFrameSource
self.depth_frame_desc = self._depth_source.FrameDescription
self._body_index_source = self._sensor.BodyIndexFrameSource
self.body_index_frame_desc = self._body_index_source.FrameDescription
self._body_source = self._sensor.BodyFrameSource
self._body_frame_data = ctypes.POINTER(ctypes.POINTER(IBody))
self.max_body_count = self._body_source.BodyCount
self._color_frame_data = None
self._depth_frame_data = None
self._body_frame_data = None
self._body_index_frame_data = None
self._infrared_frame_data = None
self._long_exposure_infrared_frame_data = None
self._audio_frame_data = None
if(self.frame_source_types & FrameSourceTypes_Color):
#pdb.set_trace()
self._color_frame_data = ctypes.POINTER(ctypes.c_ubyte)
self._color_frame_data_capacity = ctypes.c_uint(self.color_frame_desc.Width * self.color_frame_desc.Height * 4)
self._color_frame_data_type = ctypes.c_ubyte * self._color_frame_data_capacity.value
self._color_frame_data = ctypes.cast(self._color_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte))
self._color_frame_reader = self._color_source.OpenReader()
self._color_frame_arrived_event = self._color_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._color_frame_arrived_event
self._waitHandleCount += 1
if(self.frame_source_types & FrameSourceTypes_Infrared):
self._infrared_frame_data = ctypes.POINTER(ctypes.c_ushort)
self._infrared_frame_data_capacity = ctypes.c_uint(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height)
self._infrared_frame_data_type = ctypes.c_ushort * self._infrared_frame_data_capacity.value
self._infrared_frame_data = ctypes.cast(self._infrared_frame_data_type(), ctypes.POINTER(ctypes.c_ushort))
self._infrared_frame_reader = self._infrared_source.OpenReader()
self._infrared_frame_arrived_event = self._infrared_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._infrared_frame_arrived_event
self._waitHandleCount += 1
if(self.frame_source_types & FrameSourceTypes_Depth):
self._depth_frame_data = ctypes.POINTER(ctypes.c_ushort)
self._depth_frame_data_capacity = ctypes.c_uint(self.depth_frame_desc.Width * self.depth_frame_desc.Height)
self._depth_frame_data_type = ctypes.c_ushort * self._depth_frame_data_capacity.value
self._depth_frame_data = ctypes.cast(self._depth_frame_data_type(), ctypes.POINTER(ctypes.c_ushort))
self._depth_frame_reader = self._depth_source.OpenReader()
self._depth_frame_arrived_event = self._depth_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._depth_frame_arrived_event
self._waitHandleCount += 1
if(self.frame_source_types & FrameSourceTypes_BodyIndex):
self._body_index_frame_data = ctypes.POINTER(ctypes.c_ubyte)
self._body_index_frame_data_capacity = ctypes.c_uint(self.body_index_frame_desc.Width * self.body_index_frame_desc.Height)
self._body_index_frame_data_type = ctypes.c_ubyte * self._body_index_frame_data_capacity.value
self._body_index_frame_data = ctypes.cast(self._body_index_frame_data_type(), ctypes.POINTER(ctypes.c_ubyte))
self._body_index_frame_reader = self._body_index_source.OpenReader()
self._body_index_frame_arrived_event = self._body_index_frame_reader.SubscribeFrameArrived()
self._handles[self._waitHandleCount] = self._body_index_frame_arrived_event
self._waitHandleCount += 1
self._body_frame_data = None
if(self.frame_source_types & FrameSourceTypes_Body):
self._body_frame_data_capacity = ctypes.c_uint(self.max_body_count)
self._body_frame_data_type = ctypes.POINTER(IBody) * self._body_frame_data_capacity.value
self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody)))
self._body_frame_reader = self._body_source.OpenReader()
self._body_frame_arrived_event = self._body_frame_reader.SubscribeFrameArrived()
self._body_frame_bodies = None
self._handles[self._waitHandleCount] = self._body_frame_arrived_event
self._waitHandleCount += 1
thread.start_new_thread(self.kinect_frame_thread, ())
self._last_color_frame = None
self._last_depth_frame = None
self._last_body_frame = None
self._last_body_index_frame = None
self._last_infrared_frame = None
self._last_long_exposure_infrared_frame = None
self._last_audio_frame = None
start_clock = time.clock()
self._last_color_frame_access = self._last_color_frame_time = start_clock
self._last_body_frame_access = self._last_body_frame_time = start_clock
self._last_body_index_frame_access = self._last_body_index_frame_time = start_clock
self._last_depth_frame_access = self._last_depth_frame_time = start_clock
self._last_infrared_frame_access = self._last_infrared_frame_time = start_clock
self._last_long_exposure_infrared_frame_access = self._last_long_exposure_infrared_frame_time = start_clock
self._last_audio_frame_access = self._last_audio_frame_time = start_clock
def close(self):
if self._sensor is not None:
ctypes.windll.kernel32.SetEvent(self._close_event)
ctypes.windll.kernel32.CloseHandle(self._close_event)
self._color_frame_reader = None
self._depth_frame_reader = None
self._body_index_frame_reader = None
self._body_frame_reader = None
self._color_source = None
self._depth_source = None
self._body_index_source = None
self._body_source = None
self._body_frame_data = None
self._sensor.Close()
self._sensor = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def surface_as_array(self, surface_buffer_interface):
address = ctypes.c_void_p()
size = self.Py_ssize_t()
self._PyObject_AsWriteBuffer(surface_buffer_interface,
ctypes.byref(address), ctypes.byref(size))
bytes = (ctypes.c_byte * size.value).from_address(address.value)
bytes.object = surface_buffer_interface
return bytes
def has_new_color_frame(self):
has = (self._last_color_frame_time > self._last_color_frame_access)
return has
def has_new_depth_frame(self):
has = (self._last_depth_frame_time > self._last_depth_frame_access)
return has
def has_new_body_frame(self):
has = (self._last_body_frame_time > self._last_body_frame_access)
return has
def has_new_body_index_frame(self):
has = (self._last_body_index_frame_time > self._last_body_index_frame_access)
return has
def has_new_infrared_frame(self):
has = (self._last_infrared_frame_time > self._last_infrared_frame_access)
return has
def has_new_long_exposure_infrared_frame(self):
has = (self._last_long_exposure_infrared_frame_time > self._last_long_exposure_infrared_frame_access)
return has
def has_new_audio_frame(self):
has = (self._last_audio_frame_time > self._last_audio_frame_access)
return has
def get_last_color_frame(self):
with self._color_frame_lock:
if self._color_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._color_frame_data, shape=(self._color_frame_data_capacity.value,)))
self._last_color_frame_access = time.clock()
return data
else:
return None
def get_last_infrared_frame(self):
with self._infrared_frame_lock:
if self._infrared_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._infrared_frame_data, shape=(self._infrared_frame_data_capacity.value,)))
self._last_color_frame_access = time.clock()
return data
else:
return None
def get_last_depth_frame(self):
with self._depth_frame_lock:
if self._depth_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._depth_frame_data, shape=(self._depth_frame_data_capacity.value,)))
self._last_color_frame_access = time.clock()
return data,self._depth_frame_data
else:
return None
def get_last_body_index_frame(self):
with self._body_index_frame_lock:
if self._body_index_frame_data is not None:
data = numpy.copy(numpy.ctypeslib.as_array(self._body_index_frame_data, shape=(self._body_index_frame_data_capacity.value,)))
self._last_color_frame_access = time.clock()
return data
else:
return None
def get_last_body_frame(self):
with self._body_frame_lock:
if self._body_frame_bodies is not None:
self._last_body_frame_access = time.clock()
return self._body_frame_bodies.copy()
else:
return None
def get_Table(self,num):### for test only
return self._mapper.GetDepthFrameToCameraSpaceTable(num)
def get_intrinsic(self):
return self._mapper.GetDepthCameraIntrinsics()
def body_joint_to_color_space(self, joint):
return self._mapper.MapCameraPointToColorSpace(joint.Position)
def body_joint_to_depth_space(self, joint):
return self._mapper.MapCameraPointToDepthSpace(joint.Position)
def testtable(self):
#pdb.set_trace()
self._mapper.GetDepthFrameToCameraSpaceTable()
def body_joints_to_color_space(self, joints):
joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object)
for j in range(0, PyKinectV2.JointType_Count):
joint_points[j] = self.body_joint_to_color_space(joints[j])
return joint_points
def body_joints_to_depth_space(self, joints):
joint_points = numpy.ndarray((PyKinectV2.JointType_Count), dtype=numpy.object)
for j in range(0, PyKinectV2.JointType_Count):
joint_points[j] = self.body_joint_to_depth_space(joints[j])
return joint_points
# def depthpt_to_color_pt(self,pt):
# return self._mapper.MapDepthPointToColorSpace(joint.Position)
#def depthpts_to_color_pts(self,pts):
def kinect_frame_thread(self):
while 1:
wait = ctypes.windll.kernel32.WaitForMultipleObjects(self._waitHandleCount, self._handles, False, PyKinectV2._INFINITE)
if wait == 0:
break
if self._handles[wait] == self._color_frame_arrived_event:
self.handle_color_arrived(wait)
elif self._handles[wait] == self._depth_frame_arrived_event:
self.handle_depth_arrived(wait)
elif self._handles[wait] == self._body_frame_arrived_event:
self.handle_body_arrived(wait)
elif self._handles[wait] == self._body_index_frame_arrived_event:
self.handle_body_index_arrived(wait)
elif self._handles[wait] == self._infrared_frame_arrived_event:
self.handle_infrared_arrived(wait)
elif self._handles[wait] == self._long_exposure_infrared_frame_arrived_event:
self.handle_long_exposure_infrared_arrived(wait)
elif self._handles[wait] == self._audio_frame_arrived_event:
self.handle_audio_arrived(wait)
else:
break
def handle_color_arrived(self, handle_index):
colorFrameEventData = self._color_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
colorFrameRef = colorFrameEventData.FrameReference
try:
colorFrame = colorFrameRef.AcquireFrame()
try:
with self._color_frame_lock:
colorFrame.CopyConvertedFrameDataToArray(self._color_frame_data_capacity, self._color_frame_data, PyKinectV2.ColorImageFormat_Bgra)
self._last_color_frame_time = time.clock()
except:
pass
colorFrame = None
except:
pass
colorFrameRef = None
colorFrameEventData = None
def handle_depth_arrived(self, handle_index):
depthFrameEventData = self._depth_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
depthFrameRef = depthFrameEventData.FrameReference
try:
depthFrame = depthFrameRef.AcquireFrame()
try:
with self._depth_frame_lock:
depthFrame.CopyFrameDataToArray(self._depth_frame_data_capacity, self._depth_frame_data)
self._last_depth_frame_time = time.clock()
except:
pass
depthFrame = None
except:
pass
depthFrameRef = None
depthFrameEventData = None
def handle_body_arrived(self, handle_index):
bodyFrameEventData = self._body_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
bofyFrameRef = bodyFrameEventData.FrameReference
try:
bodyFrame = bofyFrameRef.AcquireFrame()
try:
with self._body_frame_lock:
bodyFrame.GetAndRefreshBodyData(self._body_frame_data_capacity, self._body_frame_data)
self._body_frame_bodies = KinectBodyFrameData(bodyFrame, self._body_frame_data, self.max_body_count)
self._last_body_frame_time = time.clock()
# need these 2 lines as a workaround for handling IBody referencing exception
self._body_frame_data = None
self._body_frame_data = ctypes.cast(self._body_frame_data_type(), ctypes.POINTER(ctypes.POINTER(IBody)))
except:
pass
bodyFrame = None
except:
pass
bofyFrameRef = None
bodyFrameEventData = None
def handle_body_index_arrived(self, handle_index):
bodyIndexFrameEventData = self._body_index_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
bodyIndexFrameRef = bodyIndexFrameEventData.FrameReference
try:
bodyIndexFrame = bodyIndexFrameRef.AcquireFrame()
try:
with self._body_index_frame_lock:
bodyIndexFrame.CopyFrameDataToArray(self._body_index_frame_data_capacity, self._body_index_frame_data)
self._last_body_index_frame_time = time.clock()
except:
pass
bodyIndexFrame = None
except:
pass
bodyIndexFrame = None
bodyIndexFrameEventData = None
def handle_infrared_arrived(self, handle_index):
infraredFrameEventData = self._infrared_frame_reader.GetFrameArrivedEventData(self._handles[handle_index])
infraredFrameRef = infraredFrameEventData.FrameReference
try:
infraredFrame = infraredFrameRef.AcquireFrame()
try:
with self._infrared_frame_lock:
infraredFrame.CopyFrameDataToArray(self._infrared_frame_data_capacity, self._infrared_frame_data)
self._last_infrared_frame_time = time.clock()
except:
pass
infraredFrame = None
except:
pass
infraredFrameRef = None
infraredFrameEventData = None
def handle_long_exposure_infrared_arrived(self, handle_index):
pass
def handle_audio_arrived(self, handle_index):
pass
class KinectBody(object):
def __init__(self, body = None):
self.is_restricted = False
self.tracking_id = -1
self.is_tracked = False
if body is not None:
self.is_tracked = body.IsTracked
if self.is_tracked:
self.is_restricted = body.IsRestricted
self.tracking_id = body.TrackingId
self.engaged = body.Engaged
self.lean = body.Lean
self.lean_tracking_state = body.LeanTrackingState
self.hand_left_state = body.HandLeftState
self.hand_left_confidence = body.HandLeftConfidence
self.hand_right_state = body.HandRightState
self.hand_right_confidence = body.HandRightConfidence
self.clipped_edges = body.ClippedEdges
joints = ctypes.POINTER(PyKinectV2._Joint)
joints_capacity = ctypes.c_uint(PyKinectV2.JointType_Count)
joints_data_type = PyKinectV2._Joint * joints_capacity.value
joints = ctypes.cast(joints_data_type(), ctypes.POINTER(PyKinectV2._Joint))
body.GetJoints(PyKinectV2.JointType_Count, joints)
self.joints = joints
joint_orientations = ctypes.POINTER(PyKinectV2._JointOrientation)
joint_orientations_data_type = PyKinectV2._JointOrientation * joints_capacity.value
joint_orientations = ctypes.cast(joint_orientations_data_type(), ctypes.POINTER(PyKinectV2._JointOrientation))
body.GetJointOrientations(PyKinectV2.JointType_Count, joint_orientations)
self.joint_orientations = joint_orientations
class KinectBodyFrameData(object):
def __init__(self, bodyFrame, body_frame_data, max_body_count):
self.bodies = None
self.floor_clip_plane = None
if bodyFrame is not None:
self.floor_clip_plane = bodyFrame.FloorClipPlane
self.relative_time = bodyFrame.RelativeTime
self.bodies = numpy.ndarray((max_body_count), dtype=numpy.object)
for i in range(0, max_body_count):
self.bodies[i] = KinectBody(body_frame_data[i])
def copy(self):
res = KinectBodyFrameData(None, None, 0)
res.floor_clip_plane = self.floor_clip_plane
res.relative_time = self.relative_time
res.bodies = numpy.copy(self.bodies)
return res
| [
"numpy.copy",
"ctypes.byref",
"ctypes.POINTER",
"time.clock",
"ctypes.windll.kernel32.SetEvent",
"ctypes.c_uint",
"numpy.ctypeslib.as_array",
"thread.allocate",
"numpy.ndarray",
"ctypes.c_void_p",
"ctypes.windll.kernel32.WaitForMultipleObjects",
"thread.start_new_thread",
"ctypes.windll.kern... | [((1501, 1562), 'ctypes.windll.kernel32.CreateEventW', 'ctypes.windll.kernel32.CreateEventW', (['None', '(False)', '(False)', 'None'], {}), '(None, False, False, None)\n', (1536, 1562), False, 'import ctypes\n'), ((1932, 1949), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (1947, 1949), False, 'import thread\n'), ((1983, 2000), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (1998, 2000), False, 'import thread\n'), ((2033, 2050), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (2048, 2050), False, 'import thread\n'), ((2089, 2106), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (2104, 2106), False, 'import thread\n'), ((2143, 2160), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (2158, 2160), False, 'import thread\n'), ((2211, 2228), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (2226, 2228), False, 'import thread\n'), ((2262, 2279), 'thread.allocate', 'thread.allocate', ([], {}), '()\n', (2277, 2279), False, 'import thread\n'), ((8028, 8081), 'thread.start_new_thread', 'thread.start_new_thread', (['self.kinect_frame_thread', '()'], {}), '(self.kinect_frame_thread, ())\n', (8051, 8081), False, 'import thread\n'), ((8396, 8408), 'time.clock', 'time.clock', ([], {}), '()\n', (8406, 8408), False, 'import time, pdb\n'), ((9885, 9902), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (9900, 9902), False, 'import ctypes\n'), ((13734, 13795), 'numpy.ndarray', 'numpy.ndarray', (['PyKinectV2.JointType_Count'], {'dtype': 'numpy.object'}), '(PyKinectV2.JointType_Count, dtype=numpy.object)\n', (13747, 13795), False, 'import numpy\n'), ((14029, 14090), 'numpy.ndarray', 'numpy.ndarray', (['PyKinectV2.JointType_Count'], {'dtype': 'numpy.object'}), '(PyKinectV2.JointType_Count, dtype=numpy.object)\n', (14042, 14090), False, 'import numpy\n'), ((22226, 22249), 'numpy.copy', 'numpy.copy', (['self.bodies'], {}), '(self.bodies)\n', (22236, 22249), False, 'import numpy\n'), ((954, 985), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_void_p'], {}), '(ctypes.c_void_p)\n', (968, 985), False, 'import ctypes\n'), ((1029, 1060), 'ctypes.POINTER', 'ctypes.POINTER', (['self.Py_ssize_t'], {}), '(self.Py_ssize_t)\n', (1043, 1060), False, 'import ctypes\n'), ((2331, 2371), 'ctypes.POINTER', 'ctypes.POINTER', (['PyKinectV2.IKinectSensor'], {}), '(PyKinectV2.IKinectSensor)\n', (2345, 2371), False, 'import ctypes\n'), ((2435, 2461), 'ctypes.byref', 'ctypes.byref', (['self._sensor'], {}), '(self._sensor)\n', (2447, 2461), False, 'import ctypes\n'), ((3762, 3783), 'ctypes.POINTER', 'ctypes.POINTER', (['IBody'], {}), '(IBody)\n', (3776, 3783), False, 'import ctypes\n'), ((4265, 4295), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ubyte'], {}), '(ctypes.c_ubyte)\n', (4279, 4295), False, 'import ctypes\n'), ((4343, 4420), 'ctypes.c_uint', 'ctypes.c_uint', (['(self.color_frame_desc.Width * self.color_frame_desc.Height * 4)'], {}), '(self.color_frame_desc.Width * self.color_frame_desc.Height * 4)\n', (4356, 4420), False, 'import ctypes\n'), ((5024, 5055), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ushort'], {}), '(ctypes.c_ushort)\n', (5038, 5055), False, 'import ctypes\n'), ((5106, 5185), 'ctypes.c_uint', 'ctypes.c_uint', (['(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height)'], {}), '(self.infrared_frame_desc.Width * self.infrared_frame_desc.Height)\n', (5119, 5185), False, 'import ctypes\n'), ((5824, 5855), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ushort'], {}), '(ctypes.c_ushort)\n', (5838, 5855), False, 'import ctypes\n'), ((5903, 5976), 'ctypes.c_uint', 'ctypes.c_uint', (['(self.depth_frame_desc.Width * self.depth_frame_desc.Height)'], {}), '(self.depth_frame_desc.Width * self.depth_frame_desc.Height)\n', (5916, 5976), False, 'import ctypes\n'), ((6585, 6615), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ubyte'], {}), '(ctypes.c_ubyte)\n', (6599, 6615), False, 'import ctypes\n'), ((6668, 6756), 'ctypes.c_uint', 'ctypes.c_uint', (['(self.body_index_frame_desc.Width * self.body_index_frame_desc.Height)'], {}), '(self.body_index_frame_desc.Width * self.body_index_frame_desc\n .Height)\n', (6681, 6756), False, 'import ctypes\n'), ((7439, 7473), 'ctypes.c_uint', 'ctypes.c_uint', (['self.max_body_count'], {}), '(self.max_body_count)\n', (7452, 7473), False, 'import ctypes\n'), ((9102, 9152), 'ctypes.windll.kernel32.SetEvent', 'ctypes.windll.kernel32.SetEvent', (['self._close_event'], {}), '(self._close_event)\n', (9133, 9152), False, 'import ctypes\n'), ((9165, 9218), 'ctypes.windll.kernel32.CloseHandle', 'ctypes.windll.kernel32.CloseHandle', (['self._close_event'], {}), '(self._close_event)\n', (9199, 9218), False, 'import ctypes\n'), ((10027, 10048), 'ctypes.byref', 'ctypes.byref', (['address'], {}), '(address)\n', (10039, 10048), False, 'import ctypes\n'), ((10050, 10068), 'ctypes.byref', 'ctypes.byref', (['size'], {}), '(size)\n', (10062, 10068), False, 'import ctypes\n'), ((14501, 14618), 'ctypes.windll.kernel32.WaitForMultipleObjects', 'ctypes.windll.kernel32.WaitForMultipleObjects', (['self._waitHandleCount', 'self._handles', '(False)', 'PyKinectV2._INFINITE'], {}), '(self._waitHandleCount, self.\n _handles, False, PyKinectV2._INFINITE)\n', (14546, 14618), False, 'import ctypes\n'), ((20707, 20740), 'ctypes.POINTER', 'ctypes.POINTER', (['PyKinectV2._Joint'], {}), '(PyKinectV2._Joint)\n', (20721, 20740), False, 'import ctypes\n'), ((20784, 20825), 'ctypes.c_uint', 'ctypes.c_uint', (['PyKinectV2.JointType_Count'], {}), '(PyKinectV2.JointType_Count)\n', (20797, 20825), False, 'import ctypes\n'), ((21117, 21161), 'ctypes.POINTER', 'ctypes.POINTER', (['PyKinectV2._JointOrientation'], {}), '(PyKinectV2._JointOrientation)\n', (21131, 21161), False, 'import ctypes\n'), ((21873, 21922), 'numpy.ndarray', 'numpy.ndarray', (['max_body_count'], {'dtype': 'numpy.object'}), '(max_body_count, dtype=numpy.object)\n', (21886, 21922), False, 'import numpy\n'), ((4598, 4628), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ubyte'], {}), '(ctypes.c_ubyte)\n', (4612, 4628), False, 'import ctypes\n'), ((5376, 5407), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ushort'], {}), '(ctypes.c_ushort)\n', (5390, 5407), False, 'import ctypes\n'), ((6155, 6186), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ushort'], {}), '(ctypes.c_ushort)\n', (6169, 6186), False, 'import ctypes\n'), ((6949, 6979), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_ubyte'], {}), '(ctypes.c_ubyte)\n', (6963, 6979), False, 'import ctypes\n'), ((7515, 7536), 'ctypes.POINTER', 'ctypes.POINTER', (['IBody'], {}), '(IBody)\n', (7529, 7536), False, 'import ctypes\n'), ((11504, 11516), 'time.clock', 'time.clock', ([], {}), '()\n', (11514, 11516), False, 'import time, pdb\n'), ((11911, 11923), 'time.clock', 'time.clock', ([], {}), '()\n', (11921, 11923), False, 'import time, pdb\n'), ((12303, 12315), 'time.clock', 'time.clock', ([], {}), '()\n', (12313, 12315), False, 'import time, pdb\n'), ((12743, 12755), 'time.clock', 'time.clock', ([], {}), '()\n', (12753, 12755), False, 'import time, pdb\n'), ((13001, 13013), 'time.clock', 'time.clock', ([], {}), '()\n', (13011, 13013), False, 'import time, pdb\n'), ((20952, 20985), 'ctypes.POINTER', 'ctypes.POINTER', (['PyKinectV2._Joint'], {}), '(PyKinectV2._Joint)\n', (20966, 20985), False, 'import ctypes\n'), ((21335, 21379), 'ctypes.POINTER', 'ctypes.POINTER', (['PyKinectV2._JointOrientation'], {}), '(PyKinectV2._JointOrientation)\n', (21349, 21379), False, 'import ctypes\n'), ((7669, 7690), 'ctypes.POINTER', 'ctypes.POINTER', (['IBody'], {}), '(IBody)\n', (7683, 7690), False, 'import ctypes\n'), ((11358, 11459), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['self._color_frame_data'], {'shape': '(self._color_frame_data_capacity.value,)'}), '(self._color_frame_data, shape=(self.\n _color_frame_data_capacity.value,))\n', (11382, 11459), False, 'import numpy\n'), ((11759, 11866), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['self._infrared_frame_data'], {'shape': '(self._infrared_frame_data_capacity.value,)'}), '(self._infrared_frame_data, shape=(self.\n _infrared_frame_data_capacity.value,))\n', (11783, 11866), False, 'import numpy\n'), ((12157, 12258), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['self._depth_frame_data'], {'shape': '(self._depth_frame_data_capacity.value,)'}), '(self._depth_frame_data, shape=(self.\n _depth_frame_data_capacity.value,))\n', (12181, 12258), False, 'import numpy\n'), ((12587, 12698), 'numpy.ctypeslib.as_array', 'numpy.ctypeslib.as_array', (['self._body_index_frame_data'], {'shape': '(self._body_index_frame_data_capacity.value,)'}), '(self._body_index_frame_data, shape=(self.\n _body_index_frame_data_capacity.value,))\n', (12611, 12698), False, 'import numpy\n'), ((16263, 16275), 'time.clock', 'time.clock', ([], {}), '()\n', (16273, 16275), False, 'import time, pdb\n'), ((16953, 16965), 'time.clock', 'time.clock', ([], {}), '()\n', (16963, 16965), False, 'import time, pdb\n'), ((17756, 17768), 'time.clock', 'time.clock', ([], {}), '()\n', (17766, 17768), False, 'import time, pdb\n'), ((18788, 18800), 'time.clock', 'time.clock', ([], {}), '()\n', (18798, 18800), False, 'import time, pdb\n'), ((19522, 19534), 'time.clock', 'time.clock', ([], {}), '()\n', (19532, 19534), False, 'import time, pdb\n'), ((18007, 18028), 'ctypes.POINTER', 'ctypes.POINTER', (['IBody'], {}), '(IBody)\n', (18021, 18028), False, 'import ctypes\n')] |
import tensorflow as tf
import time
import os
import numpy as np
import matplotlib.pyplot as plt
class Gan(object):
def __init__(self):
super(Gan, self).__init__()
# Data constants
self.size = 32
self.channels = 1
self.latent_size = 128
self.depth = 32
self.train_dir = os.path.join('train', 'gan')
# Optimizer vars
self.learning_rate = 0.0001
self.beta1 = 0.5
# Sampling generated images
self.num_samples = 8
self.z_sample = tf.random_normal(
[self.num_samples**2, self.latent_size]
)
# Choose optimizers
self.generator_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate, beta1=self.beta1
)
self.discriminator_optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate, beta1=self.beta1
)
# Make model
self.generator = self.Generator(
self.depth, self.size, self.channels, self.latent_size
)
self.discriminator = self.Discriminator(self.depth, self.size)
# Setup checkpoint
self.checkpoint_dir = os.path.join('checkpoint', 'gan')
self.checkpoint_prefix = os.path.join(self.checkpoint_dir, "ckpt")
self.checkpoint = tf.train.Checkpoint(
generator=self.generator, discriminator=self.discriminator
)
# Speed up training
self.train_step = tf.contrib.eager.defun(self.train_step)
class Generator(tf.keras.Model):
def __init__(self, depth, size, channels, latent_size):
super(Gan.Generator, self).__init__()
# Architecture params
self.depth = depth
self.size = size
self.channels = channels
self.latent_size = latent_size
# Weighted layers
self.fc1 = tf.keras.layers.Dense(
int(4*self.depth*int(self.size/8)*int(self.size/8)),
use_bias=False, input_shape=(self.latent_size,)
)
self.batchnorm1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2DTranspose(
2*self.depth, (4, 4), strides=(2, 2),
padding='same', use_bias=False
)
self.batchnorm2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2DTranspose(
self.depth, (4, 4), strides=(2, 2),
padding='same', use_bias=False
)
self.batchnorm3 = tf.keras.layers.BatchNormalization()
self.conv4 = tf.keras.layers.Conv2DTranspose(
self.channels, (4, 4), strides=(2, 2), padding='same',
use_bias=False, activation='tanh'
)
def call(self, x, training=True):
# Layer 1: LATENT_SIZE -> SIZE*SIZE*DEPTH/16
x = self.fc1(x)
x = self.batchnorm1(x, training=training)
x = tf.keras.layers.LeakyReLU()(x)
# Reshape: SIZE*SIZE*DEPTH/16 -> SIZE/8 x SIZE/8 x 4*DEPTH
x = tf.keras.layers.Reshape(
(int(self.size/8), int(self.size/8), 4*self.depth)
)(x)
self.g1 = x
# Layer 2: SIZE/8 x SIZE/8 x 4*DEPTH -> SIZE/4 x SIZE/4 x 2*DEPTH
x = self.conv2(x)
x = self.batchnorm2(x, training=training)
x = tf.keras.layers.LeakyReLU()(x)
self.g2 = x
# Layer 3: SIZE/4 x SIZE/4 x 2*DEPTH -> SIZE/2 x SIZE/2 x DEPTH
x = self.conv3(x)
x = self.batchnorm3(x, training=training)
x = tf.keras.layers.LeakyReLU()(x)
self.g3 = x
# Layer 4: SIZE/2 x SIZE/2 x DEPTH -> SIZE x SIZE x CHANNELS
x = self.conv4(x)
return x
class Discriminator(tf.keras.Model):
def __init__(self, depth, size):
super(Gan.Discriminator, self).__init__()
# Architecture params
self.depth = depth
self.size = size
# Weighted layers
self.conv1 = tf.keras.layers.Conv2D(
self.depth, (4, 4), strides=(2, 2), padding='same'
)
self.batchnorm1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(
2*self.depth, (4, 4), strides=(2, 2), padding='same'
)
self.batchnorm2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2D(
4*self.depth, (4, 4), strides=(2, 2), padding='same'
)
self.batchnorm3 = tf.keras.layers.BatchNormalization()
self.fc4 = tf.keras.layers.Dense(1)
def call(self, x, training=True):
# Layer 1: SIZE x SIZE x CHANNELS -> SIZE/2 x SIZE/2 x DEPTH
x = self.conv1(x)
x = self.batchnorm1(x, training=training)
x = tf.keras.layers.LeakyReLU()(x)
self.d1 = x
# Layer 2: SIZE/2 x SIZE/2 x DEPTH -> SIZE/4 x SIZE/4 x 2*DEPTH
x = self.conv2(x)
x = self.batchnorm2(x, training=training)
x = tf.keras.layers.LeakyReLU()(x)
self.d2 = x
# Layer 3: SIZE/4 x SIZE/4 x 2*DEPTH -> SIZE/8 x SIZE/8 x 4*DEPTH
x = self.conv3(x)
x = self.batchnorm3(x, training=training)
x = tf.keras.layers.LeakyReLU()(x)
self.d3 = x
# Reshape: SIZE/8 x SIZE/8 x 4*DEPTH -> SIZE*SIZE*DEPTH/16
x = tf.keras.layers.Flatten()(x)
# Layer 4: SIZE*SIZE*DEPTH/16 -> 1
x = self.fc4(x)
return x
# Cross entropy
def generator_loss(self, generated_output):
return tf.losses.sigmoid_cross_entropy(
tf.ones_like(generated_output), generated_output
)
def discriminator_loss(self, real_output, generated_output):
# [1,1,...,1] with real output since it is true
real_loss = tf.losses.sigmoid_cross_entropy(
multi_class_labels=tf.ones_like(real_output), logits=real_output
)
# [0,0,...,0] with generated images since they are fake
generated_loss = tf.losses.sigmoid_cross_entropy(
multi_class_labels=tf.zeros_like(generated_output),
logits=generated_output
)
total_loss = real_loss + generated_loss
return total_loss
# Update weights
def train_step(self, images):
# generating latent code from a normal distribution
latent_code = tf.random_normal(
[tf.shape(images)[0], self.latent_size]
)
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generate images
generated_images = self.generator(latent_code, training=True)
# Dicriminate real from false
real_output = self.discriminator(images, training=True)
generated_output = self.discriminator(
generated_images, training=True
)
# Compute loss
gen_loss = self.generator_loss(generated_output)
disc_loss = self.discriminator_loss(real_output, generated_output)
# Compute gradients
gradients_of_generator = gen_tape.gradient(
gen_loss, self.generator.variables
)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, self.discriminator.variables
)
# Update weights
self.generator_optimizer.apply_gradients(zip(
gradients_of_generator, self.generator.variables
))
self.discriminator_optimizer.apply_gradients(zip(
gradients_of_discriminator, self.discriminator.variables
))
# Save generated images
def generate_and_save_images(self, epoch, test_input):
predictions = self.generator(test_input, training=False)
images = predictions * 127.5 + 127.5
image = np.zeros((self.size*8, self.size*8))
for i in range(self.num_samples**2):
image[
int(i / self.num_samples)*self.size:
int(i / self.num_samples + 1)*self.size,
(i % self.num_samples)*self.size:
(i % self.num_samples + 1)*self.size
] = images[i, :, :, 0]
path = os.path.join(
self.train_dir, 'image_at_epoch_{:04d}.png'.format(epoch)
)
plt.imsave(fname=path, arr=image, cmap='gray')
# Main loop
def train(self, dataset, epochs):
# Restore the latest checkpoint
self.checkpoint.restore(
tf.train.latest_checkpoint(self.checkpoint_dir)
)
for epoch in range(epochs):
start = time.time()
# Update weights
for images in dataset:
self.train_step(images)
# Save images for checking
self.generate_and_save_images(epoch + 1, self.z_sample)
# saving (checkpoint) the model every epoch
self.checkpoint.save(file_prefix=self.checkpoint_prefix)
# Print time
print('Time taken for training epoch {} is {} sec'.format(
epoch + 1, time.time()-start
))
# Save images for checking
self.generate_and_save_images(epoch + 1, self.z_sample)
def layers(self, images):
number = images.shape[0]
# Restore the latest checkpoint
self.checkpoint.restore(
tf.train.latest_checkpoint(self.checkpoint_dir)
)
# Discriminator layers for real images
images = tf.convert_to_tensor(images, dtype=tf.float32)
_ = self.discriminator(images, training=False)
dr1 = self.discriminator.d1.numpy()
dr2 = self.discriminator.d2.numpy()
dr3 = self.discriminator.d3.numpy()
# Random sample from latent space
latent_code = tf.random_normal(
[number, self.latent_size]
)
# Generator leayers
g4 = self.generator(latent_code, training=False)
g1 = self.generator.g1.numpy()
g2 = self.generator.g2.numpy()
g3 = self.generator.g3.numpy()
# Discriminator layers for fake images
_ = self.discriminator(g4, training=False)
dg1 = self.discriminator.d1.numpy()
dg2 = self.discriminator.d2.numpy()
dg3 = self.discriminator.d3.numpy()
return dr1, dr2, dr3, g1, g2, g3, g4.numpy(), dg1, dg2, dg3
| [
"tensorflow.train.Checkpoint",
"tensorflow.shape",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.ones_like",
"tensorflow.random_normal",
"tensorflow.keras.layers.Conv2D",
"tensorflow.zeros_like",
"tensorflow.convert_to_tensor"... | [((333, 361), 'os.path.join', 'os.path.join', (['"""train"""', '"""gan"""'], {}), "('train', 'gan')\n", (345, 361), False, 'import os\n'), ((539, 598), 'tensorflow.random_normal', 'tf.random_normal', (['[self.num_samples ** 2, self.latent_size]'], {}), '([self.num_samples ** 2, self.latent_size])\n', (555, 598), True, 'import tensorflow as tf\n'), ((683, 757), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate', 'beta1': 'self.beta1'}), '(learning_rate=self.learning_rate, beta1=self.beta1)\n', (705, 757), True, 'import tensorflow as tf\n'), ((819, 893), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate', 'beta1': 'self.beta1'}), '(learning_rate=self.learning_rate, beta1=self.beta1)\n', (841, 893), True, 'import tensorflow as tf\n'), ((1185, 1218), 'os.path.join', 'os.path.join', (['"""checkpoint"""', '"""gan"""'], {}), "('checkpoint', 'gan')\n", (1197, 1218), False, 'import os\n'), ((1252, 1293), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""ckpt"""'], {}), "(self.checkpoint_dir, 'ckpt')\n", (1264, 1293), False, 'import os\n'), ((1320, 1399), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'generator': 'self.generator', 'discriminator': 'self.discriminator'}), '(generator=self.generator, discriminator=self.discriminator)\n', (1339, 1399), True, 'import tensorflow as tf\n'), ((1477, 1516), 'tensorflow.contrib.eager.defun', 'tf.contrib.eager.defun', (['self.train_step'], {}), '(self.train_step)\n', (1499, 1516), True, 'import tensorflow as tf\n'), ((8052, 8092), 'numpy.zeros', 'np.zeros', (['(self.size * 8, self.size * 8)'], {}), '((self.size * 8, self.size * 8))\n', (8060, 8092), True, 'import numpy as np\n'), ((8519, 8565), 'matplotlib.pyplot.imsave', 'plt.imsave', ([], {'fname': 'path', 'arr': 'image', 'cmap': '"""gray"""'}), "(fname=path, arr=image, cmap='gray')\n", (8529, 8565), True, 'import matplotlib.pyplot as plt\n'), ((9702, 9748), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['images'], {'dtype': 'tf.float32'}), '(images, dtype=tf.float32)\n', (9722, 9748), True, 'import tensorflow as tf\n'), ((10001, 10045), 'tensorflow.random_normal', 'tf.random_normal', (['[number, self.latent_size]'], {}), '([number, self.latent_size])\n', (10017, 10045), True, 'import tensorflow as tf\n'), ((2098, 2134), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2132, 2134), True, 'import tensorflow as tf\n'), ((2161, 2268), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(2 * self.depth)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(2 * self.depth, (4, 4), strides=(2, 2),\n padding='same', use_bias=False)\n", (2192, 2268), True, 'import tensorflow as tf\n'), ((2339, 2375), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2373, 2375), True, 'import tensorflow as tf\n'), ((2402, 2506), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['self.depth', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(self.depth, (4, 4), strides=(2, 2), padding\n ='same', use_bias=False)\n", (2433, 2506), True, 'import tensorflow as tf\n'), ((2578, 2614), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2612, 2614), True, 'import tensorflow as tf\n'), ((2641, 2766), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['self.channels', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)', 'activation': '"""tanh"""'}), "(self.channels, (4, 4), strides=(2, 2),\n padding='same', use_bias=False, activation='tanh')\n", (2672, 2766), True, 'import tensorflow as tf\n'), ((4139, 4213), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['self.depth', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(self.depth, (4, 4), strides=(2, 2), padding='same')\n", (4161, 4213), True, 'import tensorflow as tf\n'), ((4274, 4310), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (4308, 4310), True, 'import tensorflow as tf\n'), ((4337, 4415), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(2 * self.depth)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(2 * self.depth, (4, 4), strides=(2, 2), padding='same')\n", (4359, 4415), True, 'import tensorflow as tf\n'), ((4474, 4510), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (4508, 4510), True, 'import tensorflow as tf\n'), ((4537, 4615), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(4 * self.depth)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(4 * self.depth, (4, 4), strides=(2, 2), padding='same')\n", (4559, 4615), True, 'import tensorflow as tf\n'), ((4674, 4710), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (4708, 4710), True, 'import tensorflow as tf\n'), ((4735, 4759), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (4756, 4759), True, 'import tensorflow as tf\n'), ((5841, 5871), 'tensorflow.ones_like', 'tf.ones_like', (['generated_output'], {}), '(generated_output)\n', (5853, 5871), True, 'import tensorflow as tf\n'), ((6703, 6720), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6718, 6720), True, 'import tensorflow as tf\n'), ((6734, 6751), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6749, 6751), True, 'import tensorflow as tf\n'), ((8706, 8753), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (8732, 8753), True, 'import tensorflow as tf\n'), ((8821, 8832), 'time.time', 'time.time', ([], {}), '()\n', (8830, 8832), False, 'import time\n'), ((9579, 9626), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.checkpoint_dir'], {}), '(self.checkpoint_dir)\n', (9605, 9626), True, 'import tensorflow as tf\n'), ((3007, 3034), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (3032, 3034), True, 'import tensorflow as tf\n'), ((3438, 3465), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (3463, 3465), True, 'import tensorflow as tf\n'), ((3670, 3697), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (3695, 3697), True, 'import tensorflow as tf\n'), ((4976, 5003), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (5001, 5003), True, 'import tensorflow as tf\n'), ((5208, 5235), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (5233, 5235), True, 'import tensorflow as tf\n'), ((5442, 5469), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (5467, 5469), True, 'import tensorflow as tf\n'), ((5585, 5610), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (5608, 5610), True, 'import tensorflow as tf\n'), ((6106, 6131), 'tensorflow.ones_like', 'tf.ones_like', (['real_output'], {}), '(real_output)\n', (6118, 6131), True, 'import tensorflow as tf\n'), ((6316, 6347), 'tensorflow.zeros_like', 'tf.zeros_like', (['generated_output'], {}), '(generated_output)\n', (6329, 6347), True, 'import tensorflow as tf\n'), ((6640, 6656), 'tensorflow.shape', 'tf.shape', (['images'], {}), '(images)\n', (6648, 6656), True, 'import tensorflow as tf\n'), ((9296, 9307), 'time.time', 'time.time', ([], {}), '()\n', (9305, 9307), False, 'import time\n')] |
import numpy as np
import theano
import theano.tensor as T
import time
import argparse
import lasagne
import os
from lasagne import layers, regularization, nonlinearities
from load_dataset import DataLoader
from sklearn.metrics import confusion_matrix
from utils import *
import sys
IMAGE_SIZE = 256
BATCH_SIZE = 32
MOMENTUM = 0.9
MAX_EPOCH = 1
#LEARNING_RATE_SCHEDULE = dict(enumerate(np.logspace(-5.6, -10, MAX_EPOCH, base=2., dtype=theano.config.floatX)))
LEARNING_RATE_SCHEDULE = {
0: 0.02,
130: 0.01,
140: 0.005,
150: 0.002,
160: 0.001,
170: 0.0005,
180: 0.0002,
190: 0.0001,
}
if __name__ == '__main__':
#####################
# Get cmd arguments #
#####################
parser = argparse.ArgumentParser()
parser.add_argument("-n",
"--network",
type=str,
help="Path to the pickled network file")
parser.add_argument("-m",
"--model",
type=str,
default='',
help="Path to the file storing network configuration")
parser.add_argument("-e",
"--epochs",
type=int,
help="Number of epochs to train the network")
args = parser.parse_args()
print("Loading dataset...")
dloader = DataLoader(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, random_state=1106, train_path="train/trimmed256")
# for Rasim
#dloader = DataLoader(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, random_state=16, datadir="C:/workspace/projects/kaggle/retina-diabetic")
#####################
# Build the model #
#####################
if args.model:
execfile(args.model)
print("Built model:")
elif args.network:
all_layers, output = load_network(args.network)
print("Loaded network: ")
# if command-line argument was specified it overrides default and config MAX_EPOCH
if args.epochs:
MAX_EPOCH = args.epochs
print_network(all_layers)
# allocate symbolic variables for theano graph computations
batch_index = T.iscalar('batch_index')
X_batch = T.tensor4('x')
y_batch = T.fmatrix('y')
# allocate shared variables for images, labels and learing rate
x_shared = theano.shared(np.zeros((BATCH_SIZE, 3, IMAGE_SIZE, IMAGE_SIZE), dtype=theano.config.floatX),
borrow=True)
y_shared = theano.shared(np.zeros((BATCH_SIZE, 4), dtype=theano.config.floatX),
borrow=True)
learning_rate = theano.shared(np.float32(LEARNING_RATE_SCHEDULE[0]))
# use mse objective for regression
# objective = lasagne.objectives.MaskedObjective(output,
# loss_function=lasagne.objectives.mse,
# aggregation='sum')
objective = lasagne.objectives.Objective(output,
loss_function=lasagne.objectives.mse)
mask = np.array([1, 2, 3, 4], dtype=theano.config.floatX)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch,
deterministic=True)
# calculates actual predictions to determine weighted kappa
# http://www.kaggle.com/c/diabetic-retinopathy-detection/details/evaluation
#pred = T.argmax(output.get_output(X_batch, deterministic=True), axis=1)
probas = lasagne.layers.get_output(output, X_batch, deterministic=True)
pred = T.gt(probas, 0.5)
#pred = T.cast(output.get_output(X_batch, deterministic=True), 'int32').clip(0, 4)
# collect all model parameters
all_params = lasagne.layers.get_all_params(output)
# generate parameter updates for SGD with Nesterov momentum
updates = lasagne.updates.nesterov_momentum(
loss_train, all_params, learning_rate, MOMENTUM)
print("Compiling theano functions...")
# create theano functions for calculating losses on train and validation sets
iter_train = theano.function(
[], loss_train,
updates=updates,
givens={
X_batch: x_shared,
y_batch: y_shared,
},
)
iter_valid = theano.function(
[], [loss_eval, pred],
givens={
X_batch: x_shared,
y_batch: y_shared,
},
)
###################
# Actual training #
###################
# keep track of networks best performance and save net configuration
best_epoch = 0
best_valid = 1.
best_kappa = 0.
# epoch and iteration counters
epoch = 0
_iter = 0
# wait for at least this many epochs before saving the model
min_epochs = 0
# store these values for learning curves plotting
train_loss = []
valid_loss = []
kappa_loss = []
conf_mat = np.array([])
imgs_error = pd.Series([])
# wait for this many epochs if the validation error is not increasing
patience = 10
now = time.time()
print("| Epoch | Train err | Validation err | Weighted Kappa | Ratio | Time |")
print("|----------------------------------------------------------------------|")
try:
# get next chunks of data
while epoch < MAX_EPOCH:
if epoch in LEARNING_RATE_SCHEDULE:
learning_rate.set_value(LEARNING_RATE_SCHEDULE[epoch])
epoch += 1
# train the network on all chunks
batch_train_losses = []
for x_next, y_next in dloader.train_gen():
# perform forward pass and parameters update
if not len(x_next) == BATCH_SIZE:
continue
x_shared.set_value(lasagne.utils.floatX(x_next), borrow=True)
y_shared.set_value(y_next, borrow=True)
batch_train_loss = iter_train()
batch_train_losses.append(batch_train_loss)
#num_train_batches = int(np.ceil(len(x_next) / BATCH_SIZE))
avg_train_loss = np.mean(batch_train_losses)
# validate the network on validation chunks
batch_valid_losses = []
valid_predictions = []
# get prediction and error on validation set
#chunk_num = 0
for valid_x_next, valid_y_next in dloader.valid_gen():
# probas = np.zeros((4, valid_x_next.shape[0], 4), dtype=theano.config.floatX)
if not len(valid_x_next) == BATCH_SIZE:
continue
x_shared.set_value(lasagne.utils.floatX(valid_x_next), borrow=True)
y_shared.set_value(valid_y_next, borrow=True)
batch_valid_loss, prediction = iter_valid()
batch_valid_losses.append(batch_valid_loss)
valid_predictions.extend(get_predictions(prediction))
avg_valid_loss = np.mean(batch_valid_losses)
vp = np.array(valid_predictions)
#print valid_predictions
#print dloader.valid_labels
c_kappa = np.sum(valid_predictions == dloader.valid_labels.values) / float(len(dloader.valid_labels))
#kappa(dloader.valid_labels, vp)
print("|%6d | %9.6f | %14.6f | %14.5f | %1.3f | %6d |" %
(epoch,
avg_train_loss,
avg_valid_loss,
c_kappa,
avg_valid_loss / avg_train_loss,
time.time() - now))
# keep track of these for future analysis
train_loss.append(avg_train_loss)
valid_loss.append(avg_valid_loss)
kappa_loss.append(c_kappa)
# if this is the best kappa obtained so far
# save the model to make predictions on the test set
if c_kappa > best_kappa:
# always wait for min_epochs, to avoid frequent saving
# during early stages of learning
if epoch >= min_epochs:
save_network(all_layers)
conf_mat = confusion_matrix(dloader.valid_labels, valid_predictions)
imgs_error = make_predictions_series(valid_predictions, dloader.valid_images.values)
#imgs_error = images_byerror(valid_predictions, dloader.valid_labels.values, dloader.valid_images.values)
best_kappa = c_kappa
best_epoch = epoch
patience = 10
if (epoch % 10) == 0:
save_network(all_layers, filename='data/tidy/snapshot_%d.pickle' % epoch)
conf_mat = confusion_matrix(dloader.valid_labels, valid_predictions)
imgs_error = make_predictions_series(valid_predictions, dloader.valid_images.values)
results = np.array([train_loss, valid_loss, kappa_loss], dtype=np.float)
np.save("data/tidy/training_%d.npy" % epoch, results)
np.save("data/tidy/confusion_%d.npy" % epoch, conf_mat)
imgs_error.to_csv("data/tidy/imgs_error_%d.csv" % epoch)
else:
#decrease patience
patience -= 1
except KeyboardInterrupt:
print("Trainig interrupted on epoch %d" % epoch)
elapsed_time = time.time() - now
print("The best weighted quadratic kappa: %.5f obtained on epoch %d.\n The training took %d seconds." %
(best_kappa, best_epoch, elapsed_time))
print(" The average performance was %.1f images/sec" % (
(len(dloader.train_images) + len(dloader.valid_images)) * float(epoch) / elapsed_time))
results = np.array([train_loss, valid_loss, kappa_loss], dtype=np.float)
np.save("data/tidy/training.npy", results)
np.save("data/tidy/confusion.npy", conf_mat)
imgs_error.to_csv("data/tidy/imgs_error.csv")
# terminate background tasks
dloader.cleanup()
| [
"theano.tensor.gt",
"theano.tensor.iscalar",
"lasagne.updates.nesterov_momentum",
"lasagne.utils.floatX",
"numpy.array",
"load_dataset.DataLoader",
"numpy.save",
"lasagne.layers.get_all_params",
"numpy.mean",
"lasagne.objectives.Objective",
"argparse.ArgumentParser",
"theano.function",
"thea... | [((740, 765), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (763, 765), False, 'import argparse\n'), ((1394, 1504), 'load_dataset.DataLoader', 'DataLoader', ([], {'image_size': 'IMAGE_SIZE', 'batch_size': 'BATCH_SIZE', 'random_state': '(1106)', 'train_path': '"""train/trimmed256"""'}), "(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, random_state=1106,\n train_path='train/trimmed256')\n", (1404, 1504), False, 'from load_dataset import DataLoader\n'), ((2191, 2215), 'theano.tensor.iscalar', 'T.iscalar', (['"""batch_index"""'], {}), "('batch_index')\n", (2200, 2215), True, 'import theano.tensor as T\n'), ((2230, 2244), 'theano.tensor.tensor4', 'T.tensor4', (['"""x"""'], {}), "('x')\n", (2239, 2244), True, 'import theano.tensor as T\n'), ((2259, 2273), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""y"""'], {}), "('y')\n", (2268, 2273), True, 'import theano.tensor as T\n'), ((2980, 3054), 'lasagne.objectives.Objective', 'lasagne.objectives.Objective', (['output'], {'loss_function': 'lasagne.objectives.mse'}), '(output, loss_function=lasagne.objectives.mse)\n', (3008, 3054), False, 'import lasagne\n'), ((3111, 3161), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {'dtype': 'theano.config.floatX'}), '([1, 2, 3, 4], dtype=theano.config.floatX)\n', (3119, 3161), True, 'import numpy as np\n'), ((3582, 3644), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['output', 'X_batch'], {'deterministic': '(True)'}), '(output, X_batch, deterministic=True)\n', (3607, 3644), False, 'import lasagne\n'), ((3656, 3673), 'theano.tensor.gt', 'T.gt', (['probas', '(0.5)'], {}), '(probas, 0.5)\n', (3660, 3673), True, 'import theano.tensor as T\n'), ((3818, 3855), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['output'], {}), '(output)\n', (3847, 3855), False, 'import lasagne\n'), ((3934, 4020), 'lasagne.updates.nesterov_momentum', 'lasagne.updates.nesterov_momentum', (['loss_train', 'all_params', 'learning_rate', 'MOMENTUM'], {}), '(loss_train, all_params, learning_rate,\n MOMENTUM)\n', (3967, 4020), False, 'import lasagne\n'), ((4169, 4268), 'theano.function', 'theano.function', (['[]', 'loss_train'], {'updates': 'updates', 'givens': '{X_batch: x_shared, y_batch: y_shared}'}), '([], loss_train, updates=updates, givens={X_batch: x_shared,\n y_batch: y_shared})\n', (4184, 4268), False, 'import theano\n'), ((4356, 4445), 'theano.function', 'theano.function', (['[]', '[loss_eval, pred]'], {'givens': '{X_batch: x_shared, y_batch: y_shared}'}), '([], [loss_eval, pred], givens={X_batch: x_shared, y_batch:\n y_shared})\n', (4371, 4445), False, 'import theano\n'), ((4998, 5010), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5006, 5010), True, 'import numpy as np\n'), ((5144, 5155), 'time.time', 'time.time', ([], {}), '()\n', (5153, 5155), False, 'import time\n'), ((9750, 9812), 'numpy.array', 'np.array', (['[train_loss, valid_loss, kappa_loss]'], {'dtype': 'np.float'}), '([train_loss, valid_loss, kappa_loss], dtype=np.float)\n', (9758, 9812), True, 'import numpy as np\n'), ((9817, 9859), 'numpy.save', 'np.save', (['"""data/tidy/training.npy"""', 'results'], {}), "('data/tidy/training.npy', results)\n", (9824, 9859), True, 'import numpy as np\n'), ((9864, 9908), 'numpy.save', 'np.save', (['"""data/tidy/confusion.npy"""', 'conf_mat'], {}), "('data/tidy/confusion.npy', conf_mat)\n", (9871, 9908), True, 'import numpy as np\n'), ((2376, 2453), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, 3, IMAGE_SIZE, IMAGE_SIZE)'], {'dtype': 'theano.config.floatX'}), '((BATCH_SIZE, 3, IMAGE_SIZE, IMAGE_SIZE), dtype=theano.config.floatX)\n', (2384, 2453), True, 'import numpy as np\n'), ((2526, 2579), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, 4)'], {'dtype': 'theano.config.floatX'}), '((BATCH_SIZE, 4), dtype=theano.config.floatX)\n', (2534, 2579), True, 'import numpy as np\n'), ((2657, 2694), 'numpy.float32', 'np.float32', (['LEARNING_RATE_SCHEDULE[0]'], {}), '(LEARNING_RATE_SCHEDULE[0])\n', (2667, 2694), True, 'import numpy as np\n'), ((9398, 9409), 'time.time', 'time.time', ([], {}), '()\n', (9407, 9409), False, 'import time\n'), ((6175, 6202), 'numpy.mean', 'np.mean', (['batch_train_losses'], {}), '(batch_train_losses)\n', (6182, 6202), True, 'import numpy as np\n'), ((7026, 7053), 'numpy.mean', 'np.mean', (['batch_valid_losses'], {}), '(batch_valid_losses)\n', (7033, 7053), True, 'import numpy as np\n'), ((7071, 7098), 'numpy.array', 'np.array', (['valid_predictions'], {}), '(valid_predictions)\n', (7079, 7098), True, 'import numpy as np\n'), ((7198, 7254), 'numpy.sum', 'np.sum', (['(valid_predictions == dloader.valid_labels.values)'], {}), '(valid_predictions == dloader.valid_labels.values)\n', (7204, 7254), True, 'import numpy as np\n'), ((8741, 8798), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['dloader.valid_labels', 'valid_predictions'], {}), '(dloader.valid_labels, valid_predictions)\n', (8757, 8798), False, 'from sklearn.metrics import confusion_matrix\n'), ((8926, 8988), 'numpy.array', 'np.array', (['[train_loss, valid_loss, kappa_loss]'], {'dtype': 'np.float'}), '([train_loss, valid_loss, kappa_loss], dtype=np.float)\n', (8934, 8988), True, 'import numpy as np\n'), ((9005, 9058), 'numpy.save', 'np.save', (["('data/tidy/training_%d.npy' % epoch)", 'results'], {}), "('data/tidy/training_%d.npy' % epoch, results)\n", (9012, 9058), True, 'import numpy as np\n'), ((9075, 9130), 'numpy.save', 'np.save', (["('data/tidy/confusion_%d.npy' % epoch)", 'conf_mat'], {}), "('data/tidy/confusion_%d.npy' % epoch, conf_mat)\n", (9082, 9130), True, 'import numpy as np\n'), ((5863, 5891), 'lasagne.utils.floatX', 'lasagne.utils.floatX', (['x_next'], {}), '(x_next)\n', (5883, 5891), False, 'import lasagne\n'), ((6696, 6730), 'lasagne.utils.floatX', 'lasagne.utils.floatX', (['valid_x_next'], {}), '(valid_x_next)\n', (6716, 6730), False, 'import lasagne\n'), ((8199, 8256), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['dloader.valid_labels', 'valid_predictions'], {}), '(dloader.valid_labels, valid_predictions)\n', (8215, 8256), False, 'from sklearn.metrics import confusion_matrix\n'), ((7599, 7610), 'time.time', 'time.time', ([], {}), '()\n', (7608, 7610), False, 'import time\n')] |
#!/usr/bin/env python
import tempfile
import numpy as np
import geometric
import geometric.molecule
Bohr = 0.52917721
def model(coords):
'''model Hamiltonian = sum_{AB} w_{AB} * (|r_A - r_B| - b_{AB})^2.
All quantiles are in atomic unit
'''
dr = coords[:,None,:] - coords
dist = np.linalg.norm(dr, axis=2)
b = np.array([[0. , 1.8, 1.8,],
[1.8, 0. , 2.8,],
[1.8, 2.8, 0. ,]])
w = np.array([[0. , 1.0, 1.0,],
[1.0, 0. , 0.5,],
[1.0, 0.5, 0. ,]])
e = (w * (dist - b)**2).sum()
grad = np.einsum('ij,ijx->ix', 2*w*(dist-b)/(dist+1e-60), dr)
grad-= np.einsum('ij,ijx->jx', 2*w*(dist-b)/(dist+1e-60), dr)
return e, grad
class CustomEngine(geometric.engine.Engine):
def __init__(self, molecule):
super(CustomEngine, self).__init__(molecule)
def calc_new(self, coords, dirname):
energy, gradient = model(coords.reshape(-1,3))
return energy, gradient.ravel()
def test_customengine():
molecule = geometric.molecule.Molecule()
molecule.elem = ['O', 'H', 'H']
molecule.xyzs = [np.array((( 0. , 0.3, 0),
( 0.9, 0.8, 0),
(-0.9, 0.5, 0),
)) # In Angstrom
]
customengine = CustomEngine(molecule)
tmpf = tempfile.mktemp()
m = geometric.optimize.run_optimizer(customengine=customengine, check=1, input=tmpf)
coords = m.xyzs[-1] / Bohr
e = model(coords)[0]
assert e < 1e-8
if __name__ == '__main__':
test_customengine()
| [
"geometric.optimize.run_optimizer",
"tempfile.mktemp",
"numpy.array",
"geometric.molecule.Molecule",
"numpy.einsum",
"numpy.linalg.norm"
] | [((302, 328), 'numpy.linalg.norm', 'np.linalg.norm', (['dr'], {'axis': '(2)'}), '(dr, axis=2)\n', (316, 328), True, 'import numpy as np\n'), ((337, 398), 'numpy.array', 'np.array', (['[[0.0, 1.8, 1.8], [1.8, 0.0, 2.8], [1.8, 2.8, 0.0]]'], {}), '([[0.0, 1.8, 1.8], [1.8, 0.0, 2.8], [1.8, 2.8, 0.0]])\n', (345, 398), True, 'import numpy as np\n'), ((446, 507), 'numpy.array', 'np.array', (['[[0.0, 1.0, 1.0], [1.0, 0.0, 0.5], [1.0, 0.5, 0.0]]'], {}), '([[0.0, 1.0, 1.0], [1.0, 0.0, 0.5], [1.0, 0.5, 0.0]])\n', (454, 507), True, 'import numpy as np\n'), ((593, 657), 'numpy.einsum', 'np.einsum', (['"""ij,ijx->ix"""', '(2 * w * (dist - b) / (dist + 1e-60))', 'dr'], {}), "('ij,ijx->ix', 2 * w * (dist - b) / (dist + 1e-60), dr)\n", (602, 657), True, 'import numpy as np\n'), ((659, 723), 'numpy.einsum', 'np.einsum', (['"""ij,ijx->jx"""', '(2 * w * (dist - b) / (dist + 1e-60))', 'dr'], {}), "('ij,ijx->jx', 2 * w * (dist - b) / (dist + 1e-60), dr)\n", (668, 723), True, 'import numpy as np\n'), ((1045, 1074), 'geometric.molecule.Molecule', 'geometric.molecule.Molecule', ([], {}), '()\n', (1072, 1074), False, 'import geometric\n'), ((1376, 1393), 'tempfile.mktemp', 'tempfile.mktemp', ([], {}), '()\n', (1391, 1393), False, 'import tempfile\n'), ((1402, 1487), 'geometric.optimize.run_optimizer', 'geometric.optimize.run_optimizer', ([], {'customengine': 'customengine', 'check': '(1)', 'input': 'tmpf'}), '(customengine=customengine, check=1, input=tmpf\n )\n', (1434, 1487), False, 'import geometric\n'), ((1132, 1188), 'numpy.array', 'np.array', (['((0.0, 0.3, 0), (0.9, 0.8, 0), (-0.9, 0.5, 0))'], {}), '(((0.0, 0.3, 0), (0.9, 0.8, 0), (-0.9, 0.5, 0)))\n', (1140, 1188), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import optuna
import sklearn.ensemble as ensemble
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
from itertools import chain
int_dtype_list = ['int8', 'int16', 'int32',
'int64', 'uint8', 'uint16', 'uint32', 'uint64']
float_dtype_list = ['float16', 'float32', 'float64']
def convert_multi_category(train_df, test_df, split=','):
df = pd.concat([train_df, test_df])
splited_values = df.apply(
pd.value_counts).index.str.split(split).to_series()
striped_values = pd.Series(
list(chain.from_iterable(splited_values.to_list()))).str.strip()
columns = sorted(set(striped_values))
column_names = list(map(lambda x: 'label_' + x, columns))
return_df = pd.DataFrame(columns=column_names)
for _, row in df.iterrows():
droped_values = list(chain.from_iterable(
pd.Series(row).dropna().str.split(split).to_list()))
if len(droped_values) == 0:
unique_values = []
else:
unique_values = pd.Series(droped_values).str.strip().values
row_df = pd.DataFrame()
for column in columns:
row_df['label_' +
column] = [1] if (column in unique_values) else [0]
return_df = return_df.append(row_df, ignore_index=True)
return_train_df = return_df[0:len(train_df)]
return_test_df = return_df[len(train_df):].reset_index(drop=True)
return return_train_df, return_test_df
def _target_data(train_df: pd.DataFrame, target_col: str) -> pd.Series:
"""Get target column and data from train data
Extended description of function.
Parameters
----------
train_df : pd.DataFrame
train data
target_col : str
target column name
Returns
-------
pd.Series
>>> import pandas as pd
>>> data = pd.DataFrame({"param": [1, 2, 3], "target": [1, 0, 1]})
>>> _target_data(data, "target")
y1:target
0 1
1 0
2 1
"""
target_series = train_df[target_col]
target_series.name = "y1:" + target_col
return target_series
def convert_series(train_series: pd.Series, test_series: pd.Series, threshold_one_hot=0.3, include_dummy_na=False):
series = pd.concat([train_series, test_series])
dtype = series.dtype
value_counts = series.value_counts()
value_counts_number = value_counts.shape[0]
rows_count = len(series)
return_df = pd.DataFrame()
if dtype in int_dtype_list:
if value_counts_number < (rows_count * threshold_one_hot):
if not include_dummy_na:
mode_value = value_counts.index[0]
series[np.isnan(series)] = mode_value
one_hot_df = pd.get_dummies(
series, prefix=series.name, dummy_na=include_dummy_na)
for one_hot_label, one_hot_content in one_hot_df.iteritems():
return_df[one_hot_label] = one_hot_content
elif dtype in float_dtype_list:
if value_counts_number < (rows_count * threshold_one_hot):
if not include_dummy_na:
mode_value = series.value_counts().index[0]
series[np.isnan(series)] = mode_value
one_hot_df = pd.get_dummies(
series, prefix=series.name, dummy_na=include_dummy_na)
for one_hot_label, one_hot_content in one_hot_df.iteritems():
return_df[one_hot_label] = one_hot_content
else:
mean = series.mean()
series[np.isnan(series)] = mean
return_df[series.name + "_float"] = series
elif (dtype == 'object') or (dtype == 'bool'):
if value_counts_number < (rows_count * threshold_one_hot):
if not include_dummy_na:
mode_value = series.value_counts().index[0]
series[pd.isnull(series)] = mode_value
one_hot_df = pd.get_dummies(
series, prefix=series.name, dummy_na=include_dummy_na)
for one_hot_label, one_hot_content in one_hot_df.iteritems():
return_df[one_hot_label] = one_hot_content
return return_df[0:len(train_series)], return_df[len(train_series):]
def _make_return_df(train_df, test_df, target_col, threshold_one_hot, multi_category):
if (multi_category is None):
multi_category_columns = []
else:
multi_category_columns = list(chain.from_iterable(multi_category))
return_train_df = pd.DataFrame()
return_test_df = pd.DataFrame()
feature_column_index = 1
for label, train_series in train_df.iteritems():
if (label == target_col):
continue
if (label in multi_category_columns):
continue
value_counts = train_series.value_counts()
value_counts_number = value_counts.shape[0]
if (value_counts_number == 1):
continue
converted_train_df, converted_test_df = convert_series(
train_series, test_df[label], threshold_one_hot)
for converted_label, converted_train_content in converted_train_df.iteritems():
label_name = "x" + str(feature_column_index) + \
":" + converted_label
return_train_df[label_name] = converted_train_content
return_test_df[label_name] = converted_test_df[converted_label]
feature_column_index += 1
if (multi_category is not None):
for multi_columns in multi_category:
converted_train_df, converted_test_df = convert_multi_category(
train_df[multi_columns], test_df[multi_columns])
for converted_label, converted_train_content in converted_train_df.iteritems():
label_name = "x" + str(feature_column_index) + \
":" + converted_label
return_train_df[label_name] = converted_train_content
return_test_df[label_name] = converted_test_df[converted_label]
feature_column_index += 1
return return_train_df, return_test_df
def _wrapper_objective(train_df, test_df, target_series, target_col, multi_category):
target_dtype = target_series.dtype
n_estimators = 10
if target_dtype in float_dtype_list:
if target_series.value_counts().shape[0] < 10:
rf = ensemble.RandomForestClassifier(n_estimators=n_estimators)
rf_type = 'classifier'
else:
rf = ensemble.RandomForestRegressor(n_estimators=n_estimators)
rf_type = 'regressor'
else:
rf = ensemble.RandomForestClassifier(n_estimators=n_estimators)
rf_type = 'classifier'
def objective(trial):
threshold_one_hot = trial.suggest_int(
'threshold_one_hot', 0, 100) * 0.01
return_train_df, return_test_df = _make_return_df(
train_df, test_df, target_col, threshold_one_hot, multi_category)
X_train, X_test, y_train, y_test = train_test_split(
return_train_df, target_series.values, test_size=0.2, random_state=0)
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
if rf_type == 'classifier':
return 1.0 - metrics.accuracy_score(y_test, y_pred)
if rf_type == 'regressor':
return 1.0 - metrics.r2_score(y_test, y_pred)
return objective
def clean(train_df, test_df, target_col, threshold_one_hot=None, multi_category=None):
target_series = _target_data(train_df, target_col)
if threshold_one_hot is None:
study = optuna.create_study()
study.optimize(_wrapper_objective(
train_df, test_df, target_series, target_col, multi_category), n_trials=100, timeout=10 * 60)
return_train_df, return_test_df = _make_return_df(
train_df, test_df, target_col, study.best_params['threshold_one_hot'] * 0.01, multi_category)
else:
return_train_df, return_test_df = _make_return_df(
train_df, test_df, target_col, threshold_one_hot, multi_category)
return return_train_df, target_series, return_test_df
| [
"pandas.Series",
"pandas.isnull",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"pandas.get_dummies",
"itertools.chain.from_iterable",
"numpy.isnan",
"pandas.DataFrame",
"sklearn.metrics.r2_score",
"pandas.concat"... | [((437, 467), 'pandas.concat', 'pd.concat', (['[train_df, test_df]'], {}), '([train_df, test_df])\n', (446, 467), True, 'import pandas as pd\n'), ((788, 822), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (800, 822), True, 'import pandas as pd\n'), ((2299, 2337), 'pandas.concat', 'pd.concat', (['[train_series, test_series]'], {}), '([train_series, test_series])\n', (2308, 2337), True, 'import pandas as pd\n'), ((2497, 2511), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2509, 2511), True, 'import pandas as pd\n'), ((4498, 4512), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4510, 4512), True, 'import pandas as pd\n'), ((4534, 4548), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4546, 4548), True, 'import pandas as pd\n'), ((1142, 1156), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1154, 1156), True, 'import pandas as pd\n'), ((6574, 6632), 'sklearn.ensemble.RandomForestClassifier', 'ensemble.RandomForestClassifier', ([], {'n_estimators': 'n_estimators'}), '(n_estimators=n_estimators)\n', (6605, 6632), True, 'import sklearn.ensemble as ensemble\n'), ((6966, 7056), 'sklearn.model_selection.train_test_split', 'train_test_split', (['return_train_df', 'target_series.values'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(return_train_df, target_series.values, test_size=0.2,\n random_state=0)\n', (6982, 7056), False, 'from sklearn.model_selection import train_test_split\n'), ((7544, 7565), 'optuna.create_study', 'optuna.create_study', ([], {}), '()\n', (7563, 7565), False, 'import optuna\n'), ((2779, 2848), 'pandas.get_dummies', 'pd.get_dummies', (['series'], {'prefix': 'series.name', 'dummy_na': 'include_dummy_na'}), '(series, prefix=series.name, dummy_na=include_dummy_na)\n', (2793, 2848), True, 'import pandas as pd\n'), ((4438, 4473), 'itertools.chain.from_iterable', 'chain.from_iterable', (['multi_category'], {}), '(multi_category)\n', (4457, 4473), False, 'from itertools import chain\n'), ((6334, 6392), 'sklearn.ensemble.RandomForestClassifier', 'ensemble.RandomForestClassifier', ([], {'n_estimators': 'n_estimators'}), '(n_estimators=n_estimators)\n', (6365, 6392), True, 'import sklearn.ensemble as ensemble\n'), ((6459, 6516), 'sklearn.ensemble.RandomForestRegressor', 'ensemble.RandomForestRegressor', ([], {'n_estimators': 'n_estimators'}), '(n_estimators=n_estimators)\n', (6489, 6516), True, 'import sklearn.ensemble as ensemble\n'), ((3278, 3347), 'pandas.get_dummies', 'pd.get_dummies', (['series'], {'prefix': 'series.name', 'dummy_na': 'include_dummy_na'}), '(series, prefix=series.name, dummy_na=include_dummy_na)\n', (3292, 3347), True, 'import pandas as pd\n'), ((7196, 7234), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7218, 7234), True, 'import sklearn.metrics as metrics\n'), ((7295, 7327), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7311, 7327), True, 'import sklearn.metrics as metrics\n'), ((2723, 2739), 'numpy.isnan', 'np.isnan', (['series'], {}), '(series)\n', (2731, 2739), True, 'import numpy as np\n'), ((3564, 3580), 'numpy.isnan', 'np.isnan', (['series'], {}), '(series)\n', (3572, 3580), True, 'import numpy as np\n'), ((3939, 4008), 'pandas.get_dummies', 'pd.get_dummies', (['series'], {'prefix': 'series.name', 'dummy_na': 'include_dummy_na'}), '(series, prefix=series.name, dummy_na=include_dummy_na)\n', (3953, 4008), True, 'import pandas as pd\n'), ((3222, 3238), 'numpy.isnan', 'np.isnan', (['series'], {}), '(series)\n', (3230, 3238), True, 'import numpy as np\n'), ((1081, 1105), 'pandas.Series', 'pd.Series', (['droped_values'], {}), '(droped_values)\n', (1090, 1105), True, 'import pandas as pd\n'), ((3882, 3899), 'pandas.isnull', 'pd.isnull', (['series'], {}), '(series)\n', (3891, 3899), True, 'import pandas as pd\n'), ((919, 933), 'pandas.Series', 'pd.Series', (['row'], {}), '(row)\n', (928, 933), True, 'import pandas as pd\n')] |
r"""Create data for kernel tests. Kernel tests are just securing status quo."""
import numpy as np
from copy import deepcopy
from scipy.constants import mu_0, epsilon_0
from empymod import kernel, filters
# All possible (ab, msrc, mrec) combinations
pab = (np.arange(1, 7)[None, :] + np.array([10, 20, 30])[:, None]).ravel()
iab = {}
for mrec in [False, True]:
for ab in pab:
if ab == 36:
continue
if ab % 10 > 3:
msrc = True
else:
msrc = False
if mrec:
msrc = not msrc
iab[ab] = (msrc, mrec)
# # A -- ANGLE # #
angres = []
angle = np.array([1., 2., 4., 5.])
for key, val in iab.items():
inp = {'angle': angle, 'ab': key, 'msrc': val[0], 'mrec': val[1]}
res = kernel.angle_factor(angle, key, val[0], val[1])
angres.append({'inp': inp, 'res': res})
# # B -- WAVENUMBER # #
# Example: 6-layer model; source in second layer, receiver in last
freq = np.array([0.003, 2.5, 1e6])
res = np.array([3, .3, 10, 4, 3, 1])
aniso = np.array([1, .5, 3, 1, 2, 1])
epermH = np.array([80, 100, 3, 8, 1, 1])
epermV = np.array([100, 30, 1, 10, 68, 9])
mpermH = np.array([.5, 100, 30, 1, 30, 1])
mpermV = np.array([2, 1, 30, 9, 50, 1])
etaH = 1/res + np.outer(2j*np.pi*freq, epermH*epsilon_0)
etaV = 1/(res*aniso*aniso) + np.outer(2j*np.pi*freq, epermV*epsilon_0)
zetaH = np.outer(2j*np.pi*freq, mpermH*mu_0)
zetaV = np.outer(2j*np.pi*freq, mpermV*mu_0)
lambd = filters.key_51_2012().base/np.array([0.001, 1, 100, 10000])[:, None]
depth = np.array([-np.infty, 0, 150, 300, 500, 600])
inp1 = {'zsrc': np.array([100]),
'zrec': np.array([650]),
'lsrc': np.array(1),
'lrec': np.array(5),
'depth': depth,
'etaH': etaH,
'etaV': etaV,
'zetaH': zetaH,
'zetaV': zetaV,
'lambd': lambd,
'xdirect': False,
'use_ne_eval': False}
wave = {}
for key, val in iab.items():
res = kernel.wavenumber(ab=key, msrc=val[0], mrec=val[1], **inp1)
wave[key] = (key, val[0], val[1], inp1, res)
# # C -- GREENFCT # #
# Standard example
inp2 = deepcopy(inp1)
# Source and receiver in same layer (last)
inp3 = deepcopy(inp1)
inp3['zsrc'] = np.array([610])
inp3['lsrc'] = np.array(5)
# Receiver in first layer
inp4 = deepcopy(inp1)
inp4['zrec'] = np.array([-30])
inp4['lrec'] = np.array(0)
green = {}
for key, val in iab.items():
res1 = kernel.greenfct(ab=key, msrc=val[0], mrec=val[1], **inp2)
res2 = kernel.greenfct(ab=key, msrc=val[0], mrec=val[1], **inp3)
res3 = kernel.greenfct(ab=key, msrc=val[0], mrec=val[1], **inp4)
green[key] = (key, val[0], val[1], inp2, res1, inp3, res2, inp4, res3)
# # D -- REFLECTIONS # #
refl = {}
# Standard example
Gam = np.sqrt((etaH/etaV)[:, None, :, None] *
(lambd**2)[None, :, None, :] + (zetaH**2)[:, None, :, None])
inp5 = {'depth': depth,
'e_zH': etaH,
'Gam': Gam,
'lrec': inp1['lrec'],
'lsrc': inp1['lsrc'],
'use_ne_eval': False}
Rp1, Rm1 = kernel.reflections(**inp5)
refl[0] = (inp5, Rp1, Rm1)
# Source and receiver in same layer, but not last
inp6 = {'depth': inp2['depth'],
'e_zH': etaH,
'Gam': Gam,
'lrec': np.array(3),
'lsrc': np.array(3),
'use_ne_eval': False}
Rp2, Rm2 = kernel.reflections(**inp6)
refl[1] = (inp6, Rp2, Rm2)
# # E -- FIELDS # #
# Standard example
inp7 = {'depth': depth,
'Rp': Rp1,
'Rm': Rm1,
'Gam': Gam,
'lrec': inp5['lrec'],
'lsrc': inp5['lsrc'],
'zsrc': inp1['zsrc'],
'use_ne_eval': False}
# Source and receiver in same layer, but not last
inp8 = {'depth': depth,
'Rp': Rp2,
'Rm': Rm2,
'Gam': Gam,
'lrec': inp6['lrec'],
'lsrc': inp6['lsrc'],
'zsrc': np.array([350]),
'use_ne_eval': False}
# Source and receiver in same layer, but not last
Rp4, Rm4 = kernel.reflections(depth, etaH, Gam, np.array(5),
np.array(5), False)
inp10 = {'depth': depth,
'Rp': Rp4,
'Rm': Rm4,
'Gam': Gam,
'lrec': np.array(5),
'lsrc': np.array(5),
'zsrc': np.array([700]),
'use_ne_eval': False}
# Receiver in first layer, source in last
Rp3, Rm3 = kernel.reflections(depth, etaH, Gam, np.array(0),
np.array(5), False)
inp9 = {'depth': depth,
'Rp': Rp3,
'Rm': Rm3,
'Gam': Gam,
'lrec': np.array(0),
'lsrc': np.array(5),
'zsrc': np.array([700]),
'use_ne_eval': False}
# Source in first layer, receiver in last
Rp5, Rm5 = kernel.reflections(depth, etaH, Gam, np.array(5),
np.array(0), False)
inp11 = {'depth': depth,
'Rp': Rp5,
'Rm': Rm5,
'Gam': Gam,
'lrec': np.array(5),
'lsrc': np.array(0),
'zsrc': np.array([-30]),
'use_ne_eval': False}
fields = {}
for TM in [False, True]:
for ab in pab:
if TM and ab in [16, 26]:
continue
elif not TM and ab in [13, 23, 31, 32, 33, 34, 35]:
continue
elif ab == 36:
continue
out1 = kernel.fields(ab=ab, TM=TM, **inp7)
out2 = kernel.fields(ab=ab, TM=TM, **inp8)
out3 = kernel.fields(ab=ab, TM=TM, **inp9)
out4 = kernel.fields(ab=ab, TM=TM, **inp10)
out5 = kernel.fields(ab=ab, TM=TM, **inp11)
fields[ab] = (ab, TM, inp7, out1, inp8, out2, inp9, out3, inp10, out4,
inp11, out5)
# # F -- Store data # #
np.savez_compressed('../data/kernel.npz', angres=angres, wave=wave,
green=green, refl=refl, fields=fields)
| [
"numpy.sqrt",
"empymod.kernel.angle_factor",
"numpy.array",
"numpy.outer",
"empymod.kernel.greenfct",
"empymod.filters.key_51_2012",
"copy.deepcopy",
"empymod.kernel.wavenumber",
"numpy.savez_compressed",
"empymod.kernel.fields",
"empymod.kernel.reflections",
"numpy.arange"
] | [((627, 657), 'numpy.array', 'np.array', (['[1.0, 2.0, 4.0, 5.0]'], {}), '([1.0, 2.0, 4.0, 5.0])\n', (635, 657), True, 'import numpy as np\n'), ((955, 988), 'numpy.array', 'np.array', (['[0.003, 2.5, 1000000.0]'], {}), '([0.003, 2.5, 1000000.0])\n', (963, 988), True, 'import numpy as np\n'), ((989, 1020), 'numpy.array', 'np.array', (['[3, 0.3, 10, 4, 3, 1]'], {}), '([3, 0.3, 10, 4, 3, 1])\n', (997, 1020), True, 'import numpy as np\n'), ((1028, 1058), 'numpy.array', 'np.array', (['[1, 0.5, 3, 1, 2, 1]'], {}), '([1, 0.5, 3, 1, 2, 1])\n', (1036, 1058), True, 'import numpy as np\n'), ((1067, 1098), 'numpy.array', 'np.array', (['[80, 100, 3, 8, 1, 1]'], {}), '([80, 100, 3, 8, 1, 1])\n', (1075, 1098), True, 'import numpy as np\n'), ((1108, 1141), 'numpy.array', 'np.array', (['[100, 30, 1, 10, 68, 9]'], {}), '([100, 30, 1, 10, 68, 9])\n', (1116, 1141), True, 'import numpy as np\n'), ((1151, 1185), 'numpy.array', 'np.array', (['[0.5, 100, 30, 1, 30, 1]'], {}), '([0.5, 100, 30, 1, 30, 1])\n', (1159, 1185), True, 'import numpy as np\n'), ((1194, 1224), 'numpy.array', 'np.array', (['[2, 1, 30, 9, 50, 1]'], {}), '([2, 1, 30, 9, 50, 1])\n', (1202, 1224), True, 'import numpy as np\n'), ((1361, 1405), 'numpy.outer', 'np.outer', (['(2.0j * np.pi * freq)', '(mpermH * mu_0)'], {}), '(2.0j * np.pi * freq, mpermH * mu_0)\n', (1369, 1405), True, 'import numpy as np\n'), ((1406, 1450), 'numpy.outer', 'np.outer', (['(2.0j * np.pi * freq)', '(mpermV * mu_0)'], {}), '(2.0j * np.pi * freq, mpermV * mu_0)\n', (1414, 1450), True, 'import numpy as np\n'), ((1528, 1572), 'numpy.array', 'np.array', (['[-np.infty, 0, 150, 300, 500, 600]'], {}), '([-np.infty, 0, 150, 300, 500, 600])\n', (1536, 1572), True, 'import numpy as np\n'), ((2101, 2115), 'copy.deepcopy', 'deepcopy', (['inp1'], {}), '(inp1)\n', (2109, 2115), False, 'from copy import deepcopy\n'), ((2166, 2180), 'copy.deepcopy', 'deepcopy', (['inp1'], {}), '(inp1)\n', (2174, 2180), False, 'from copy import deepcopy\n'), ((2196, 2211), 'numpy.array', 'np.array', (['[610]'], {}), '([610])\n', (2204, 2211), True, 'import numpy as np\n'), ((2227, 2238), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (2235, 2238), True, 'import numpy as np\n'), ((2272, 2286), 'copy.deepcopy', 'deepcopy', (['inp1'], {}), '(inp1)\n', (2280, 2286), False, 'from copy import deepcopy\n'), ((2302, 2317), 'numpy.array', 'np.array', (['[-30]'], {}), '([-30])\n', (2310, 2317), True, 'import numpy as np\n'), ((2333, 2344), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (2341, 2344), True, 'import numpy as np\n'), ((2730, 2840), 'numpy.sqrt', 'np.sqrt', (['((etaH / etaV)[:, None, :, None] * (lambd ** 2)[None, :, None, :] + (zetaH **\n 2)[:, None, :, None])'], {}), '((etaH / etaV)[:, None, :, None] * (lambd ** 2)[None, :, None, :] +\n (zetaH ** 2)[:, None, :, None])\n', (2737, 2840), True, 'import numpy as np\n'), ((3012, 3038), 'empymod.kernel.reflections', 'kernel.reflections', ([], {}), '(**inp5)\n', (3030, 3038), False, 'from empymod import kernel, filters\n'), ((3289, 3315), 'empymod.kernel.reflections', 'kernel.reflections', ([], {}), '(**inp6)\n', (3307, 3315), False, 'from empymod import kernel, filters\n'), ((5570, 5681), 'numpy.savez_compressed', 'np.savez_compressed', (['"""../data/kernel.npz"""'], {'angres': 'angres', 'wave': 'wave', 'green': 'green', 'refl': 'refl', 'fields': 'fields'}), "('../data/kernel.npz', angres=angres, wave=wave, green=\n green, refl=refl, fields=fields)\n", (5589, 5681), True, 'import numpy as np\n'), ((763, 810), 'empymod.kernel.angle_factor', 'kernel.angle_factor', (['angle', 'key', 'val[0]', 'val[1]'], {}), '(angle, key, val[0], val[1])\n', (782, 810), False, 'from empymod import kernel, filters\n'), ((1240, 1289), 'numpy.outer', 'np.outer', (['(2.0j * np.pi * freq)', '(epermH * epsilon_0)'], {}), '(2.0j * np.pi * freq, epermH * epsilon_0)\n', (1248, 1289), True, 'import numpy as np\n'), ((1311, 1360), 'numpy.outer', 'np.outer', (['(2.0j * np.pi * freq)', '(epermV * epsilon_0)'], {}), '(2.0j * np.pi * freq, epermV * epsilon_0)\n', (1319, 1360), True, 'import numpy as np\n'), ((1589, 1604), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (1597, 1604), True, 'import numpy as np\n'), ((1622, 1637), 'numpy.array', 'np.array', (['[650]'], {}), '([650])\n', (1630, 1637), True, 'import numpy as np\n'), ((1655, 1666), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1663, 1666), True, 'import numpy as np\n'), ((1684, 1695), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (1692, 1695), True, 'import numpy as np\n'), ((1942, 2001), 'empymod.kernel.wavenumber', 'kernel.wavenumber', ([], {'ab': 'key', 'msrc': 'val[0]', 'mrec': 'val[1]'}), '(ab=key, msrc=val[0], mrec=val[1], **inp1)\n', (1959, 2001), False, 'from empymod import kernel, filters\n'), ((2396, 2453), 'empymod.kernel.greenfct', 'kernel.greenfct', ([], {'ab': 'key', 'msrc': 'val[0]', 'mrec': 'val[1]'}), '(ab=key, msrc=val[0], mrec=val[1], **inp2)\n', (2411, 2453), False, 'from empymod import kernel, filters\n'), ((2465, 2522), 'empymod.kernel.greenfct', 'kernel.greenfct', ([], {'ab': 'key', 'msrc': 'val[0]', 'mrec': 'val[1]'}), '(ab=key, msrc=val[0], mrec=val[1], **inp3)\n', (2480, 2522), False, 'from empymod import kernel, filters\n'), ((2534, 2591), 'empymod.kernel.greenfct', 'kernel.greenfct', ([], {'ab': 'key', 'msrc': 'val[0]', 'mrec': 'val[1]'}), '(ab=key, msrc=val[0], mrec=val[1], **inp4)\n', (2549, 2591), False, 'from empymod import kernel, filters\n'), ((3206, 3217), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (3214, 3217), True, 'import numpy as np\n'), ((3235, 3246), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (3243, 3246), True, 'import numpy as np\n'), ((3793, 3808), 'numpy.array', 'np.array', (['[350]'], {}), '([350])\n', (3801, 3808), True, 'import numpy as np\n'), ((3939, 3950), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (3947, 3950), True, 'import numpy as np\n'), ((3982, 3993), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (3990, 3993), True, 'import numpy as np\n'), ((4105, 4116), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (4113, 4116), True, 'import numpy as np\n'), ((4135, 4146), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (4143, 4146), True, 'import numpy as np\n'), ((4165, 4180), 'numpy.array', 'np.array', (['[700]'], {}), '([700])\n', (4173, 4180), True, 'import numpy as np\n'), ((4304, 4315), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (4312, 4315), True, 'import numpy as np\n'), ((4347, 4358), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (4355, 4358), True, 'import numpy as np\n'), ((4465, 4476), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (4473, 4476), True, 'import numpy as np\n'), ((4494, 4505), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (4502, 4505), True, 'import numpy as np\n'), ((4523, 4538), 'numpy.array', 'np.array', (['[700]'], {}), '([700])\n', (4531, 4538), True, 'import numpy as np\n'), ((4661, 4672), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (4669, 4672), True, 'import numpy as np\n'), ((4704, 4715), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (4712, 4715), True, 'import numpy as np\n'), ((4827, 4838), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (4835, 4838), True, 'import numpy as np\n'), ((4857, 4868), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (4865, 4868), True, 'import numpy as np\n'), ((4887, 4902), 'numpy.array', 'np.array', (['[-30]'], {}), '([-30])\n', (4895, 4902), True, 'import numpy as np\n'), ((1451, 1472), 'empymod.filters.key_51_2012', 'filters.key_51_2012', ([], {}), '()\n', (1470, 1472), False, 'from empymod import kernel, filters\n'), ((1478, 1510), 'numpy.array', 'np.array', (['[0.001, 1, 100, 10000]'], {}), '([0.001, 1, 100, 10000])\n', (1486, 1510), True, 'import numpy as np\n'), ((5189, 5224), 'empymod.kernel.fields', 'kernel.fields', ([], {'ab': 'ab', 'TM': 'TM'}), '(ab=ab, TM=TM, **inp7)\n', (5202, 5224), False, 'from empymod import kernel, filters\n'), ((5240, 5275), 'empymod.kernel.fields', 'kernel.fields', ([], {'ab': 'ab', 'TM': 'TM'}), '(ab=ab, TM=TM, **inp8)\n', (5253, 5275), False, 'from empymod import kernel, filters\n'), ((5291, 5326), 'empymod.kernel.fields', 'kernel.fields', ([], {'ab': 'ab', 'TM': 'TM'}), '(ab=ab, TM=TM, **inp9)\n', (5304, 5326), False, 'from empymod import kernel, filters\n'), ((5342, 5378), 'empymod.kernel.fields', 'kernel.fields', ([], {'ab': 'ab', 'TM': 'TM'}), '(ab=ab, TM=TM, **inp10)\n', (5355, 5378), False, 'from empymod import kernel, filters\n'), ((5394, 5430), 'empymod.kernel.fields', 'kernel.fields', ([], {'ab': 'ab', 'TM': 'TM'}), '(ab=ab, TM=TM, **inp11)\n', (5407, 5430), False, 'from empymod import kernel, filters\n'), ((258, 273), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (267, 273), True, 'import numpy as np\n'), ((285, 307), 'numpy.array', 'np.array', (['[10, 20, 30]'], {}), '([10, 20, 30])\n', (293, 307), True, 'import numpy as np\n')] |
# External imports
import numpy as np
import os
import argparse
import importlib
# Local imports
from .helpers import data as avdata
from .helpers import util as util
from .helpers import combine as combine
def train(args):
repo = args.datarepo
network = args.NETWORK
epoch = args.epoch
rc = args.recompute
trainset = args.TRAINSET
simset = network+'-'+trainset
simfile = util.get_sim_path(repo)+simset+'.csv'
if not os.path.isfile(simfile) or rc:
print("Computing similarities...")
util.compute_similarities(repo, trainset, network, epoch)
outdir = util.get_output_path(repo, simset)
print("Loading network output ("+simset+")")
problems = avdata.load_similarities(repo, network, trainset)
results = []
print("Evaluating min")
res = util.eval_combine(combine.cmin, problems, outdir+'min.csv')
for r in res:
results.append(['min']+r)
print("Evaluating max")
res = util.eval_combine(combine.cmax, problems, outdir+'max.csv')
for r in res:
results.append(['max']+r)
print("Evaluating uniform")
res = util.eval_combine(combine.uniform, problems, outdir+'uniform.csv')
for r in res:
results.append(['uniform']+r)
print("Evaluating exp")
for lt in np.arange(0.00, 0.21, 0.01):
for ll in np.arange(0.00, 0.11, 0.01):
mname = 'exp-{0:.2f}-{1:.2f}'.format(lt,ll)
res = util.eval_combine(lambda seq: combine.exponential(seq, lt, ll),
problems, outdir+mname+'.csv')
for r in res:
results.append([mname]+r)
print("Evaluating majority")
res = util.eval_combine(combine.majority, problems, outdir+'majority.csv')
for r in res:
results.append(['majority']+r)
with open(outdir+'summary.csv', 'w') as f:
f.write('Threshold;Method;Delta;True;False;PredTrue;PredFalse;TP;FP;TN;FN;Accuracy;FAR\n')
prev = None
for threshold in np.arange(0.05,1.01,0.05):
bestacc = 0.0
best = None
for r in results:
[method, delta, T, F, PT, PF, TP, FP, TN, FN, acc, far] = r
if far < threshold:
if acc > bestacc:
bestacc = acc
best = r
if best is not None:
f.write("{0:.2f}".format(threshold)+';'+best[0]+';'+util.pp(best[1:]))
if best != prev:
print('#### Best result for threshold = {0:.2f}'.format(threshold))
print("Strategy: "+str(best[0]))
print(util.pp(best[1:], False))
prev = best
def test(args):
repo = args.datarepo
network = args.NETWORK
epoch = args.epoch
rc = args.recompute
lamb = args.lamb
func = args.COMBINE
delta = args.delta
trainset = args.TESTSET
simset = network+'-'+trainset
simfile = util.get_sim_path(repo)+simset+'.csv'
if not os.path.isfile(simfile) or rc:
print("Computing similarities...")
util.compute_similarities(repo, trainset, network, epoch)
outdir = util.get_output_path(repo, simset)
print("Loading network output ("+simset+")")
problems = avdata.load_similarities(repo, network, trainset)
fun = combine.get_fun(func, lamb)
if delta is None:
# ROC curve
print("Generating ROC curve for "+str(func)
+(", lambda = "+str(lamb) if func=='exp' else ''))
util.eval_combine(fun, problems, outdir+'test-result-roc.csv')
else:
print("Testing with "+str(func)
+(", lambda = "+str(lamb) if func=='exp' else '')+", delta = "+str(delta))
r = util.run_combine(fun, problems, delta)
print(util.pp(r, line=False))
methodstr = func + ("{0:.2f}".format(lamb) if func=='exp' else '')
with open(outdir+'test-result.csv', 'w') as f:
f.write('Method;Delta;True;False;PredTrue;PredFalse;TP;FP;TN;FN;Accuracy;FAR\n')
f.write(methodstr+';'+util.pp(r))
| [
"os.path.isfile",
"numpy.arange"
] | [((1311, 1337), 'numpy.arange', 'np.arange', (['(0.0)', '(0.21)', '(0.01)'], {}), '(0.0, 0.21, 0.01)\n', (1320, 1337), True, 'import numpy as np\n'), ((1358, 1384), 'numpy.arange', 'np.arange', (['(0.0)', '(0.11)', '(0.01)'], {}), '(0.0, 0.11, 0.01)\n', (1367, 1384), True, 'import numpy as np\n'), ((2006, 2033), 'numpy.arange', 'np.arange', (['(0.05)', '(1.01)', '(0.05)'], {}), '(0.05, 1.01, 0.05)\n', (2015, 2033), True, 'import numpy as np\n'), ((461, 484), 'os.path.isfile', 'os.path.isfile', (['simfile'], {}), '(simfile)\n', (475, 484), False, 'import os\n'), ((3031, 3054), 'os.path.isfile', 'os.path.isfile', (['simfile'], {}), '(simfile)\n', (3045, 3054), False, 'import os\n')] |
import glob
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.sdist import sdist as _sdist
from setuptools.extension import Extension
from setupext import check_for_openmp
def get_version(filename):
"""
Get version from a file.
Inspired by https://github.mabuchilab/QNET/.
"""
with open(filename) as f:
for line in f.readlines():
if line.startswith("__version__"):
return line.split("=")[1].strip()[1:-1]
raise RuntimeError("Could not get version from %s." % filename)
VERSION = get_version("yt_astro_analysis/__init__.py")
if os.path.exists("MANIFEST"):
os.remove("MANIFEST")
with open("README.md") as file:
long_description = file.read()
if check_for_openmp() is True:
omp_args = ["-fopenmp"]
else:
omp_args = None
if os.name == "nt":
std_libs = []
else:
std_libs = ["m"]
cython_extensions = [
Extension(
"yt_astro_analysis.ppv_cube.ppv_utils",
["yt_astro_analysis/ppv_cube/ppv_utils.pyx"],
libraries=std_libs,
),
]
extensions = [
Extension(
"yt_astro_analysis.halo_analysis.halo_finding.fof.EnzoFOF",
[
"yt_astro_analysis/halo_analysis/halo_finding/fof/EnzoFOF.c",
"yt_astro_analysis/halo_analysis/halo_finding/fof/kd.c",
],
libraries=std_libs,
),
Extension(
"yt_astro_analysis.halo_analysis.halo_finding.hop.EnzoHop",
glob.glob("yt_astro_analysis/halo_analysis/halo_finding/hop/*.c"),
),
]
dev_requirements = [
"astropy",
"codecov",
"flake8",
"girder-client",
"gitpython",
"nose",
"nose-timer",
"pytest",
"scipy",
"sphinx",
"sphinx_bootstrap_theme",
"twine",
"wheel",
]
# ROCKSTAR
if os.path.exists("rockstar.cfg"):
try:
rd = open("rockstar.cfg").read().strip()
except OSError:
print("Reading Rockstar location from rockstar.cfg failed.")
print("Please place the base directory of your")
print("rockstar-galaxies install in rockstar.cfg and restart.")
print("(ex: \"echo '/path/to/rockstar-galaxies' > rockstar.cfg\" )")
sys.exit(1)
rockstar_extdir = "yt_astro_analysis/halo_analysis/halo_finding/rockstar"
rockstar_extensions = [
Extension(
"yt_astro_analysis.halo_analysis.halo_finding.rockstar.rockstar_interface",
sources=[os.path.join(rockstar_extdir, "rockstar_interface.pyx")],
),
Extension(
"yt_astro_analysis.halo_analysis.halo_finding.rockstar.rockstar_groupies",
sources=[os.path.join(rockstar_extdir, "rockstar_groupies.pyx")],
),
]
for ext in rockstar_extensions:
ext.library_dirs.append(rd)
ext.libraries.append("rockstar-galaxies")
ext.define_macros.append(("THREADSAFE", ""))
ext.include_dirs += [rd, os.path.join(rd, "io"), os.path.join(rd, "util")]
extensions += rockstar_extensions
class build_ext(_build_ext):
# subclass setuptools extension builder to avoid importing cython and numpy
# at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
def finalize_options(self):
from Cython.Build import cythonize
self.distribution.ext_modules[:] = cythonize(self.distribution.ext_modules)
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process
# see http://stackoverflow.com/a/21621493/1382869
if isinstance(__builtins__, dict):
# sometimes this is a dict so we need to check for that
# https://docs.python.org/3/library/builtins.html
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
class sdist(_sdist):
# subclass setuptools source distribution builder to ensure cython
# generated C files are included in source distribution.
# See http://stackoverflow.com/a/18418524/1382869
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
from Cython.Build import cythonize
cythonize(cython_extensions)
_sdist.run(self)
setup(
long_description=long_description,
cmdclass={"sdist": sdist, "build_ext": build_ext},
ext_modules=cython_extensions + extensions,
extras_require={
"dev": dev_requirements,
},
packages=find_packages(),
)
| [
"os.path.exists",
"setupext.check_for_openmp",
"Cython.Build.cythonize",
"setuptools.find_packages",
"setuptools.extension.Extension",
"setuptools.command.sdist.sdist.run",
"os.path.join",
"numpy.get_include",
"sys.exit",
"setuptools.command.build_ext.build_ext.finalize_options",
"glob.glob",
... | [((699, 725), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (713, 725), False, 'import os\n'), ((1863, 1893), 'os.path.exists', 'os.path.exists', (['"""rockstar.cfg"""'], {}), "('rockstar.cfg')\n", (1877, 1893), False, 'import os\n'), ((731, 752), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (740, 752), False, 'import os\n'), ((825, 843), 'setupext.check_for_openmp', 'check_for_openmp', ([], {}), '()\n', (841, 843), False, 'from setupext import check_for_openmp\n'), ((1000, 1120), 'setuptools.extension.Extension', 'Extension', (['"""yt_astro_analysis.ppv_cube.ppv_utils"""', "['yt_astro_analysis/ppv_cube/ppv_utils.pyx']"], {'libraries': 'std_libs'}), "('yt_astro_analysis.ppv_cube.ppv_utils', [\n 'yt_astro_analysis/ppv_cube/ppv_utils.pyx'], libraries=std_libs)\n", (1009, 1120), False, 'from setuptools.extension import Extension\n'), ((1170, 1394), 'setuptools.extension.Extension', 'Extension', (['"""yt_astro_analysis.halo_analysis.halo_finding.fof.EnzoFOF"""', "['yt_astro_analysis/halo_analysis/halo_finding/fof/EnzoFOF.c',\n 'yt_astro_analysis/halo_analysis/halo_finding/fof/kd.c']"], {'libraries': 'std_libs'}), "('yt_astro_analysis.halo_analysis.halo_finding.fof.EnzoFOF', [\n 'yt_astro_analysis/halo_analysis/halo_finding/fof/EnzoFOF.c',\n 'yt_astro_analysis/halo_analysis/halo_finding/fof/kd.c'], libraries=\n std_libs)\n", (1179, 1394), False, 'from setuptools.extension import Extension\n'), ((1539, 1604), 'glob.glob', 'glob.glob', (['"""yt_astro_analysis/halo_analysis/halo_finding/hop/*.c"""'], {}), "('yt_astro_analysis/halo_analysis/halo_finding/hop/*.c')\n", (1548, 1604), False, 'import glob\n'), ((3379, 3419), 'Cython.Build.cythonize', 'cythonize', (['self.distribution.ext_modules'], {}), '(self.distribution.ext_modules)\n', (3388, 3419), False, 'from Cython.Build import cythonize\n'), ((3428, 3461), 'setuptools.command.build_ext.build_ext.finalize_options', '_build_ext.finalize_options', (['self'], {}), '(self)\n', (3455, 3461), True, 'from setuptools.command.build_ext import build_ext as _build_ext\n'), ((4316, 4344), 'Cython.Build.cythonize', 'cythonize', (['cython_extensions'], {}), '(cython_extensions)\n', (4325, 4344), False, 'from Cython.Build import cythonize\n'), ((4353, 4369), 'setuptools.command.sdist.sdist.run', '_sdist.run', (['self'], {}), '(self)\n', (4363, 4369), True, 'from setuptools.command.sdist import sdist as _sdist\n'), ((4595, 4610), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (4608, 4610), False, 'from setuptools import find_packages, setup\n'), ((2256, 2267), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2264, 2267), False, 'import sys\n'), ((2981, 3003), 'os.path.join', 'os.path.join', (['rd', '"""io"""'], {}), "(rd, 'io')\n", (2993, 3003), False, 'import os\n'), ((3005, 3029), 'os.path.join', 'os.path.join', (['rd', '"""util"""'], {}), "(rd, 'util')\n", (3017, 3029), False, 'import os\n'), ((3934, 3953), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3951, 3953), False, 'import numpy\n'), ((2503, 2558), 'os.path.join', 'os.path.join', (['rockstar_extdir', '"""rockstar_interface.pyx"""'], {}), "(rockstar_extdir, 'rockstar_interface.pyx')\n", (2515, 2558), False, 'import os\n'), ((2699, 2753), 'os.path.join', 'os.path.join', (['rockstar_extdir', '"""rockstar_groupies.pyx"""'], {}), "(rockstar_extdir, 'rockstar_groupies.pyx')\n", (2711, 2753), False, 'import os\n')] |
from typing import List, Union
import shutil
from pathlib import Path
import numpy as np
from .baserecording import BaseRecording, BaseRecordingSegment
from .core_tools import read_binary_recording, write_binary_recording
from .job_tools import _shared_job_kwargs_doc
class BinaryRecordingExtractor(BaseRecording):
"""
RecordingExtractor for a binary format
Parameters
----------
file_paths: str or Path or list
Path to the binary file
sampling_frequency: float
The sampling frequncy
num_chan: int
Number of channels
dtype: str or dtype
The dtype of the binary file
time_axis: int
The axis of the time dimension (default 0: F order)
channel_ids: list (optional)
A list of channel ids
file_offset: int (optional)
Number of bytes in the file to offset by during memmap instantiation.
gain_to_uV: float or array-like (optional)
The gain to apply to the traces
offset_to_uV: float or array-like
The offset to apply to the traces
is_filtered: bool or None
If True, the recording is assumed to be filtered. If None, is_filtered is not set.
Returns
-------
recording: BinaryRecordingExtractor
The recording Extractor
"""
extractor_name = 'BinaryRecordingExtractor'
has_default_locations = False
installed = True # check at class level if installed or not
is_writable = True
mode = 'file'
installation_mesg = "" # error message when not installed
def __init__(self, file_paths, sampling_frequency, num_chan, dtype, channel_ids=None,
time_axis=0, file_offset=0, gain_to_uV=None, offset_to_uV=None,
is_filtered=None):
if channel_ids is None:
channel_ids = list(range(num_chan))
else:
assert len(channel_ids) == num_chan, 'Provided recording channels have the wrong length'
BaseRecording.__init__(self, sampling_frequency, channel_ids, dtype)
if isinstance(file_paths, list):
# several segment
datfiles = [Path(p) for p in file_paths]
else:
# one segment
datfiles = [Path(file_paths)]
dtype = np.dtype(dtype)
for datfile in datfiles:
rec_segment = BinaryRecordingSegment(datfile, num_chan, dtype, time_axis, file_offset)
self.add_recording_segment(rec_segment)
if is_filtered is not None:
self.annotate(is_filtered=is_filtered)
if gain_to_uV is not None:
self.set_channel_gains(gain_to_uV)
if offset_to_uV is not None:
self.set_channel_offsets(offset_to_uV)
self._kwargs = {'file_paths': [str(e.absolute()) for e in datfiles],
'sampling_frequency': sampling_frequency,
'num_chan': num_chan, 'dtype': dtype.str,
'channel_ids': channel_ids, 'time_axis': time_axis, 'file_offset': file_offset,
'gain_to_uV': gain_to_uV, 'offset_to_uV': offset_to_uV,
'is_filtered': is_filtered
}
@staticmethod
def write_recording(recording, file_paths, dtype=None, **job_kwargs):
"""
Save the traces of a recording extractor in binary .dat format.
Parameters
----------
recording: RecordingExtractor
The recording extractor object to be saved in .dat format
file_paths: str
The path to the file.
dtype: dtype
Type of the saved data. Default float32.
{}
"""
write_binary_recording(recording, file_paths=file_paths, dtype=dtype, **job_kwargs)
BinaryRecordingExtractor.write_recording.__doc__ = BinaryRecordingExtractor.write_recording.__doc__.format(
_shared_job_kwargs_doc)
class BinaryRecordingSegment(BaseRecordingSegment):
def __init__(self, datfile, num_chan, dtype, time_axis, file_offset):
BaseRecordingSegment.__init__(self)
self._timeseries = read_binary_recording(datfile, num_chan, dtype, time_axis, file_offset)
def get_num_samples(self) -> int:
"""Returns the number of samples in this signal block
Returns:
SampleIndex: Number of samples in the signal block
"""
return self._timeseries.shape[0]
def get_traces(self,
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
channel_indices: Union[List, None] = None,
) -> np.ndarray:
traces = self._timeseries[start_frame:end_frame]
if channel_indices is not None:
traces = traces[:, channel_indices]
if self._timeseries.dtype.str.startswith('uint'):
exp_idx = self._dtype.find('int') + 3
exp = int(self._dtype[exp_idx:])
traces = traces.astype('float32') - 2 ** (exp - 1)
return traces
# For backward compatibity (old good time)
BinDatRecordingExtractor = BinaryRecordingExtractor
def read_binary(*args, **kwargs):
recording = BinaryRecordingExtractor(*args, **kwargs)
return recording
read_binary.__doc__ = BinaryRecordingExtractor.__doc__
| [
"numpy.dtype",
"pathlib.Path"
] | [((2238, 2253), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2246, 2253), True, 'import numpy as np\n'), ((2110, 2117), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (2114, 2117), False, 'from pathlib import Path\n'), ((2203, 2219), 'pathlib.Path', 'Path', (['file_paths'], {}), '(file_paths)\n', (2207, 2219), False, 'from pathlib import Path\n')] |
import pickle
import numpy as np
from profile import Profile
from pathlib import Path as _Path
import os as _os
_path = _Path(_os.path.dirname(_os.path.abspath(__file__)))
class Database:
def __init__(self, file):
self.file = file
try:
self.profiles = self.getDatabase()
print("Loaded from database", flush = True)
except EOFError:
self.profiles = []
def __repr__(self):
return "{}".format(self.profiles)
def addProfile(self, profile):
new = True
ind = -1
for i, prof in enumerate(self.profiles):
if prof.name == profile.name:
new = False
ind = i
if new:
self.profiles.append(profile)
else:
self.profiles[i].addDescr(profile.descr)
self.updateDatabase()
print(self.profiles)
def updateDatabase(self):
with open(_path / self.file, "wb") as f:
pickle.dump(self.profiles, f)
def getDatabase(self):
with open(_path / self.file, "rb") as f:
currentProfile = pickle.load(f)
return currentProfile
def removeProfile(self, name):
for i, prof in enumerate(self.profiles):
if (prof.name == name):
del self.profiles[i]
self.updateDatabase()
print(self.profiles)
def clear(self):
self.profiles = []
self.updateDatabase()
print("profiles: {}".format(self.profiles))
#getting a descripiton vector of 128,
#getting a database of profiles (each profile is a class_ in list with each class containing self.name and self.descr
#take input vector and compare to each descr vector
def computeMatches(self, picDescr, profiles):
distances = []
for i, prof in enumerate(profiles):
curDes = prof.descr
distances.append(np.linalg.norm(picDescr - curDes))
distances = np.array(distances)
print(np.min(distances))
if np.min(distances) > 0.45:
return Profile("Not Recognized", None)
return profiles[np.argmin(distances)]
| [
"pickle.dump",
"profile.Profile",
"pickle.load",
"numpy.linalg.norm",
"numpy.array",
"numpy.argmin",
"numpy.min",
"os.path.abspath"
] | [((144, 170), 'os.path.abspath', '_os.path.abspath', (['__file__'], {}), '(__file__)\n', (160, 170), True, 'import os as _os\n'), ((1971, 1990), 'numpy.array', 'np.array', (['distances'], {}), '(distances)\n', (1979, 1990), True, 'import numpy as np\n'), ((989, 1018), 'pickle.dump', 'pickle.dump', (['self.profiles', 'f'], {}), '(self.profiles, f)\n', (1000, 1018), False, 'import pickle\n'), ((1125, 1139), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1136, 1139), False, 'import pickle\n'), ((2005, 2022), 'numpy.min', 'np.min', (['distances'], {}), '(distances)\n', (2011, 2022), True, 'import numpy as np\n'), ((2035, 2052), 'numpy.min', 'np.min', (['distances'], {}), '(distances)\n', (2041, 2052), True, 'import numpy as np\n'), ((2080, 2111), 'profile.Profile', 'Profile', (['"""Not Recognized"""', 'None'], {}), "('Not Recognized', None)\n", (2087, 2111), False, 'from profile import Profile\n'), ((2136, 2156), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (2145, 2156), True, 'import numpy as np\n'), ((1916, 1949), 'numpy.linalg.norm', 'np.linalg.norm', (['(picDescr - curDes)'], {}), '(picDescr - curDes)\n', (1930, 1949), True, 'import numpy as np\n')] |
"""Rational quadratic kernel."""
from typing import Optional
import numpy as np
import probnum.utils as _utils
from probnum.typing import IntArgType, ScalarArgType
from ._kernel import IsotropicMixin, Kernel
class RatQuad(Kernel, IsotropicMixin):
r"""Rational quadratic kernel.
Covariance function defined by
.. math::
:nowrap:
\begin{equation}
k(x_0, x_1)
= \left(
1 + \frac{\lVert x_0 - x_1 \rVert_2^2}{2 \alpha l^2}
\right)^{-\alpha},
\end{equation}
where :math:`\alpha > 0`. For :math:`\alpha \rightarrow \infty` the rational
quadratic kernel converges to the :class:`~probnum.kernels.ExpQuad` kernel.
Parameters
----------
input_dim :
Input dimension of the kernel.
lengthscale :
Lengthscale :math:`l` of the kernel. Describes the input scale on which the
process varies.
alpha :
Scale mixture :math:`\alpha`. Positive constant determining the weighting
between different lengthscales.
See Also
--------
ExpQuad : Exponentiated Quadratic / RBF kernel.
Examples
--------
>>> import numpy as np
>>> from probnum.kernels import RatQuad
>>> K = RatQuad(input_dim=1, lengthscale=0.1, alpha=3)
>>> xs = np.linspace(0, 1, 3)[:, None]
>>> K(xs[:, None, :], xs[None, :, :])
array([[1.00000000e+00, 7.25051190e-03, 1.81357765e-04],
[7.25051190e-03, 1.00000000e+00, 7.25051190e-03],
[1.81357765e-04, 7.25051190e-03, 1.00000000e+00]])
"""
def __init__(
self,
input_dim: IntArgType,
lengthscale: ScalarArgType = 1.0,
alpha: ScalarArgType = 1.0,
):
self.lengthscale = _utils.as_numpy_scalar(lengthscale)
self.alpha = _utils.as_numpy_scalar(alpha)
if not self.alpha > 0:
raise ValueError(f"Scale mixture alpha={self.alpha} must be positive.")
super().__init__(input_dim=input_dim)
def _evaluate(self, x0: np.ndarray, x1: Optional[np.ndarray] = None) -> np.ndarray:
if x1 is None:
return np.ones_like(x0[..., 0])
return (
1.0
+ (
self._squared_euclidean_distances(x0, x1)
/ (2.0 * self.alpha * self.lengthscale ** 2)
)
) ** -self.alpha
| [
"numpy.ones_like",
"probnum.utils.as_numpy_scalar"
] | [((1747, 1782), 'probnum.utils.as_numpy_scalar', '_utils.as_numpy_scalar', (['lengthscale'], {}), '(lengthscale)\n', (1769, 1782), True, 'import probnum.utils as _utils\n'), ((1804, 1833), 'probnum.utils.as_numpy_scalar', '_utils.as_numpy_scalar', (['alpha'], {}), '(alpha)\n', (1826, 1833), True, 'import probnum.utils as _utils\n'), ((2126, 2150), 'numpy.ones_like', 'np.ones_like', (['x0[..., 0]'], {}), '(x0[..., 0])\n', (2138, 2150), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import numpy as np
import logging
from constant import *
import utility
from synset2vec_hier import get_synset_encoder
logger = logging.getLogger(__file__)
logging.basicConfig(
format="[%(asctime)s - %(filename)s:line %(lineno)s] %(message)s",
datefmt='%d %b %H:%M:%S')
logger.setLevel(logging.INFO)
def process(options, label_set):
overwrite = options.overwrite
rootpath = options.rootpath
w2v_corpus = options.w2v_corpus
w2v = options.w2v
embedding = options.embedding
resdir = os.path.join(rootpath, 'synset2vec', label_set, '%s,%s,%s' % (w2v_corpus, w2v, embedding))
resfile = os.path.join(resdir, 'feature.bin')
if os.path.exists(resfile) and not overwrite:
logger.info('%s exists. quit', resfile)
return 0
synset_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'synsets_%s.txt' % label_set)
synsets = map(str.strip, open(synset_file).readlines())
s2v = get_synset_encoder(embedding)(w2v_corpus, w2v, rootpath=rootpath)
utility.makedirsforfile(resfile)
good = []
with open(resfile, 'wb') as fw:
for i,wnid in enumerate(synsets):
#if i % 1e3 == 0:
# printStatus(INFO, '%d done' % i)
vec = s2v.embedding(wnid)
if vec is not None:
vec = np.array(vec, dtype=np.float32)
vec.tofile(fw)
good.append(wnid)
fw.close()
logger.info('%d done, %d okay' % ((i+1), len(good)))
with open(os.path.join(resdir, 'id.txt'), 'w') as fw:
fw.write(' '.join(good))
fw.close()
with open(os.path.join(resdir, 'shape.txt'), 'w') as fw:
fw.write('%d %d' % (len(good), s2v.get_feat_dim()))
fw.close()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage="""usage: %prog [options] label_set""")
parser.add_option("--overwrite", default=0, type="int", help="overwrite existing file (default: 0)")
parser.add_option("--rootpath", default=ROOT_PATH, type="string", help="rootpath (default: %s)" % ROOT_PATH)
parser.add_option("--w2v_corpus", default=DEFAULT_W2V_CORPUS, type="string", help="corpus using which word2vec is trained (default: %s)" % DEFAULT_W2V_CORPUS)
parser.add_option("--w2v", default=DEFAULT_W2V, type="string", help="word2vec model (default: %s)" % DEFAULT_W2V)
parser.add_option("--embedding", default=DEFAULT_EMBEDDING, type="string", help="embedding model (default: %s)" % DEFAULT_EMBEDDING)
(options, args) = parser.parse_args(argv)
if len(args) < 1:
parser.print_help()
return 1
return process(options, args[0])
if __name__ == "__main__":
sys.exit(main())
| [
"logging.getLogger",
"utility.makedirsforfile",
"logging.basicConfig",
"os.path.exists",
"os.path.join",
"optparse.OptionParser",
"os.path.realpath",
"numpy.array",
"synset2vec_hier.get_synset_encoder"
] | [((261, 288), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (278, 288), False, 'import logging\n'), ((289, 411), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s - %(filename)s:line %(lineno)s] %(message)s"""', 'datefmt': '"""%d %b %H:%M:%S"""'}), "(format=\n '[%(asctime)s - %(filename)s:line %(lineno)s] %(message)s', datefmt=\n '%d %b %H:%M:%S')\n", (308, 411), False, 'import logging\n'), ((651, 745), 'os.path.join', 'os.path.join', (['rootpath', '"""synset2vec"""', 'label_set', "('%s,%s,%s' % (w2v_corpus, w2v, embedding))"], {}), "(rootpath, 'synset2vec', label_set, '%s,%s,%s' % (w2v_corpus,\n w2v, embedding))\n", (663, 745), False, 'import os\n'), ((756, 791), 'os.path.join', 'os.path.join', (['resdir', '"""feature.bin"""'], {}), "(resdir, 'feature.bin')\n", (768, 791), False, 'import os\n'), ((1165, 1197), 'utility.makedirsforfile', 'utility.makedirsforfile', (['resfile'], {}), '(resfile)\n', (1188, 1197), False, 'import utility\n'), ((2022, 2076), 'optparse.OptionParser', 'OptionParser', ([], {'usage': '"""usage: %prog [options] label_set"""'}), "(usage='usage: %prog [options] label_set')\n", (2034, 2076), False, 'from optparse import OptionParser\n'), ((799, 822), 'os.path.exists', 'os.path.exists', (['resfile'], {}), '(resfile)\n', (813, 822), False, 'import os\n'), ((1092, 1121), 'synset2vec_hier.get_synset_encoder', 'get_synset_encoder', (['embedding'], {}), '(embedding)\n', (1110, 1121), False, 'from synset2vec_hier import get_synset_encoder\n'), ((955, 981), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (971, 981), False, 'import os\n'), ((1659, 1689), 'os.path.join', 'os.path.join', (['resdir', '"""id.txt"""'], {}), "(resdir, 'id.txt')\n", (1671, 1689), False, 'import os\n'), ((1770, 1803), 'os.path.join', 'os.path.join', (['resdir', '"""shape.txt"""'], {}), "(resdir, 'shape.txt')\n", (1782, 1803), False, 'import os\n'), ((1464, 1495), 'numpy.array', 'np.array', (['vec'], {'dtype': 'np.float32'}), '(vec, dtype=np.float32)\n', (1472, 1495), True, 'import numpy as np\n')] |
"""GaussianCNNModel."""
import numpy as np
import tensorflow as tf
from garage.tf.distributions import DiagonalGaussian
from garage.tf.models.cnn import cnn
from garage.tf.models.mlp import mlp
from garage.tf.models.model import Model
from garage.tf.models.parameter import parameter
class GaussianCNNModel(Model):
"""GaussianCNNModel.
Args:
filter_dims(tuple[int]): Dimension of the filters. For example,
(3, 5) means there are two convolutional layers. The filter
for first layer is of dimension (3 x 3) and the second one is of
dimension (5 x 5).
num_filters(tuple[int]): Number of filters. For example, (3, 32) means
there are two convolutional layers. The filter for the first layer
has 3 channels and the second one with 32 channels.
strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
padding (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
output_dim (int): Output dimension of the model.
name (str): Model name, also the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Convolutional model for mean. For example, (32, 32) means the
network consists of two dense layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_filter_dims(tuple[int]): Dimension of the filters. For example,
(3, 5) means there are two convolutional layers. The filter
for first layer is of dimension (3 x 3) and the second one is of
dimension (5 x 5).
std_num_filters(tuple[int]): Number of filters. For example, (3, 32)
means there are two convolutional layers. The filter for the first
layer has 3 channels and the second one with 32 channels.
std_strides(tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
std_padding (str): The type of padding algorithm to use in std network,
either 'SAME' or 'VALID'.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the Conv for std. For example, (32, 32) means the Conv consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues.
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues.
std_hidden_nonlinearity (callable): Nonlinearity for each hidden layer
in the std network.
std_hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s) in the std network.
std_hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s) in the std network.
std_output_nonlinearity (callable): Activation function for output
dense layer in the std network. It should return a tf.Tensor. Set
it to None to maintain a linear activation.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parameterization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
output_dim,
filter_dims,
num_filters,
strides,
padding,
hidden_sizes,
name=None,
hidden_nonlinearity=tf.nn.tanh,
hidden_w_init=tf.initializers.glorot_uniform(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.initializers.glorot_uniform(),
output_b_init=tf.zeros_initializer(),
learn_std=True,
adaptive_std=False,
std_share_network=False,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_filter_dims=(),
std_num_filters=(),
std_strides=(),
std_padding='SAME',
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=tf.nn.tanh,
std_hidden_w_init=tf.initializers.glorot_uniform(),
std_hidden_b_init=tf.zeros_initializer(),
std_output_nonlinearity=None,
std_output_w_init=tf.initializers.glorot_uniform(),
std_parameterization='exp',
layer_normalization=False):
# Network parameters
super().__init__(name)
self._output_dim = output_dim
self._num_filters = num_filters
self._filter_dims = filter_dims
self._strides = strides
self._padding = padding
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._learn_std = learn_std
self._adaptive_std = adaptive_std
self._std_share_network = std_share_network
self._init_std = init_std
self._min_std = min_std
self._max_std = max_std
self._std_num_filters = std_num_filters
self._std_filter_dims = std_filter_dims
self._std_strides = std_strides
self._std_padding = std_padding
self._std_hidden_sizes = std_hidden_sizes
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._layer_normalization = layer_normalization
# Tranform std arguments to parameterized space
self._init_std_param = None
self._min_std_param = None
self._max_std_param = None
if self._std_parameterization == 'exp':
self._init_std_param = np.log(init_std)
if min_std is not None:
self._min_std_param = np.log(min_std)
if max_std is not None:
self._max_std_param = np.log(max_std)
elif self._std_parameterization == 'softplus':
self._init_std_param = np.log(np.exp(init_std) - 1)
if min_std is not None:
self._min_std_param = np.log(np.exp(min_std) - 1)
if max_std is not None:
self._max_std_param = np.log(np.exp(max_std) - 1)
else:
raise NotImplementedError
def network_output_spec(self):
"""Network output spec.
Return:
list[str]: List of key(str) for the network outputs.
"""
return ['sample', 'mean', 'log_std', 'std_param', 'dist']
# pylint: disable=arguments-differ
def _build(self, state_input, name=None):
"""Build model given input placeholder(s).
Args:
state_input (tf.Tensor): Place holder for state input.
name (str): Inner model name, also the variable scope of the
inner model, if exist. One example is
garage.tf.models.Sequential.
Return:
tf.Tensor: Sampled action.
tf.Tensor: Mean.
tf.Tensor: Parameterized log_std.
tf.Tensor: log_std.
garage.tf.distributions.DiagonalGaussian: Policy distribution.
"""
del name
action_dim = self._output_dim
with tf.compat.v1.variable_scope('dist_params'):
if self._std_share_network:
# mean and std networks share an CNN
b = np.concatenate([
np.zeros(action_dim),
np.full(action_dim, self._init_std_param)
], axis=0) # yapf: disable
mean_std_conv = cnn(
input_var=state_input,
filter_dims=self._filter_dims,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
num_filters=self._num_filters,
strides=self._strides,
padding=self._padding,
name='mean_std_cnn')
mean_std_network = mlp(
mean_std_conv,
output_dim=action_dim * 2,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=tf.constant_initializer(b),
name='mean_std_network',
layer_normalization=self._layer_normalization)
with tf.compat.v1.variable_scope('mean_network'):
mean_network = mean_std_network[..., :action_dim]
with tf.compat.v1.variable_scope('log_std_network'):
log_std_network = mean_std_network[..., action_dim:]
else:
# separate MLPs for mean and std networks
# mean network
mean_conv = cnn(input_var=state_input,
filter_dims=self._filter_dims,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
num_filters=self._num_filters,
strides=self._strides,
padding=self._padding,
name='mean_cnn')
mean_network = mlp(
mean_conv,
output_dim=action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
name='mean_network',
layer_normalization=self._layer_normalization)
# std network
if self._adaptive_std:
log_std_conv = cnn(
input_var=state_input,
filter_dims=self._std_filter_dims,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
num_filters=self._std_num_filters,
strides=self._std_strides,
padding=self._std_padding,
name='log_std_cnn')
log_std_network = mlp(
log_std_conv,
output_dim=action_dim,
hidden_sizes=self._std_hidden_sizes,
hidden_nonlinearity=self._std_hidden_nonlinearity,
hidden_w_init=self._std_hidden_w_init,
hidden_b_init=self._std_hidden_b_init,
output_nonlinearity=self._std_output_nonlinearity,
output_w_init=self._std_output_w_init,
output_b_init=tf.constant_initializer(
self._init_std_param),
name='log_std_network',
layer_normalization=self._layer_normalization)
else:
log_std_network = parameter(
input_var=state_input,
length=action_dim,
initializer=tf.constant_initializer(
self._init_std_param),
trainable=self._learn_std,
name='log_std_network')
mean_var = mean_network
std_param = log_std_network
with tf.compat.v1.variable_scope('std_limits'):
if self._min_std_param is not None:
std_param = tf.maximum(std_param, self._min_std_param)
if self._max_std_param is not None:
std_param = tf.minimum(std_param, self._max_std_param)
with tf.compat.v1.variable_scope('std_parameterization'):
# build std_var with std parameterization
if self._std_parameterization == 'exp':
log_std_var = std_param
else: # we know it must be softplus here
log_std_var = tf.math.log(tf.math.log(1. + tf.exp(std_param)))
dist = DiagonalGaussian(self._output_dim)
rnd = tf.random.normal(shape=mean_var.get_shape().as_list()[1:])
action_var = rnd * tf.exp(log_std_var) + mean_var
return action_var, mean_var, log_std_var, std_param, dist
| [
"tensorflow.compat.v1.variable_scope",
"tensorflow.maximum",
"numpy.full",
"tensorflow.initializers.glorot_uniform",
"numpy.log",
"garage.tf.models.cnn.cnn",
"garage.tf.models.mlp.mlp",
"numpy.exp",
"tensorflow.exp",
"numpy.zeros",
"tensorflow.constant_initializer",
"tensorflow.zeros_initializ... | [((5617, 5649), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (5647, 5649), True, 'import tensorflow as tf\n'), ((5683, 5705), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (5703, 5705), True, 'import tensorflow as tf\n'), ((5783, 5815), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (5813, 5815), True, 'import tensorflow as tf\n'), ((5849, 5871), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (5869, 5871), True, 'import tensorflow as tf\n'), ((6367, 6399), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (6397, 6399), True, 'import tensorflow as tf\n'), ((6437, 6459), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (6457, 6459), True, 'import tensorflow as tf\n'), ((6545, 6577), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (6575, 6577), True, 'import tensorflow as tf\n'), ((15552, 15586), 'garage.tf.distributions.DiagonalGaussian', 'DiagonalGaussian', (['self._output_dim'], {}), '(self._output_dim)\n', (15568, 15586), False, 'from garage.tf.distributions import DiagonalGaussian\n'), ((8380, 8396), 'numpy.log', 'np.log', (['init_std'], {}), '(init_std)\n', (8386, 8396), True, 'import numpy as np\n'), ((9936, 9978), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""dist_params"""'], {}), "('dist_params')\n", (9963, 9978), True, 'import tensorflow as tf\n'), ((14896, 14937), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""std_limits"""'], {}), "('std_limits')\n", (14923, 14937), True, 'import tensorflow as tf\n'), ((15197, 15248), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""std_parameterization"""'], {}), "('std_parameterization')\n", (15224, 15248), True, 'import tensorflow as tf\n'), ((8473, 8488), 'numpy.log', 'np.log', (['min_std'], {}), '(min_std)\n', (8479, 8488), True, 'import numpy as np\n'), ((8565, 8580), 'numpy.log', 'np.log', (['max_std'], {}), '(max_std)\n', (8571, 8580), True, 'import numpy as np\n'), ((10299, 10590), 'garage.tf.models.cnn.cnn', 'cnn', ([], {'input_var': 'state_input', 'filter_dims': 'self._filter_dims', 'hidden_nonlinearity': 'self._hidden_nonlinearity', 'hidden_w_init': 'self._hidden_w_init', 'hidden_b_init': 'self._hidden_b_init', 'num_filters': 'self._num_filters', 'strides': 'self._strides', 'padding': 'self._padding', 'name': '"""mean_std_cnn"""'}), "(input_var=state_input, filter_dims=self._filter_dims,\n hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self.\n _hidden_w_init, hidden_b_init=self._hidden_b_init, num_filters=self.\n _num_filters, strides=self._strides, padding=self._padding, name=\n 'mean_std_cnn')\n", (10302, 10590), False, 'from garage.tf.models.cnn import cnn\n'), ((11845, 12132), 'garage.tf.models.cnn.cnn', 'cnn', ([], {'input_var': 'state_input', 'filter_dims': 'self._filter_dims', 'hidden_nonlinearity': 'self._hidden_nonlinearity', 'hidden_w_init': 'self._hidden_w_init', 'hidden_b_init': 'self._hidden_b_init', 'num_filters': 'self._num_filters', 'strides': 'self._strides', 'padding': 'self._padding', 'name': '"""mean_cnn"""'}), "(input_var=state_input, filter_dims=self._filter_dims,\n hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self.\n _hidden_w_init, hidden_b_init=self._hidden_b_init, num_filters=self.\n _num_filters, strides=self._strides, padding=self._padding, name='mean_cnn'\n )\n", (11848, 12132), False, 'from garage.tf.models.cnn import cnn\n'), ((12412, 12806), 'garage.tf.models.mlp.mlp', 'mlp', (['mean_conv'], {'output_dim': 'action_dim', 'hidden_sizes': 'self._hidden_sizes', 'hidden_nonlinearity': 'self._hidden_nonlinearity', 'hidden_w_init': 'self._hidden_w_init', 'hidden_b_init': 'self._hidden_b_init', 'output_nonlinearity': 'self._output_nonlinearity', 'output_w_init': 'self._output_w_init', 'output_b_init': 'self._output_b_init', 'name': '"""mean_network"""', 'layer_normalization': 'self._layer_normalization'}), "(mean_conv, output_dim=action_dim, hidden_sizes=self._hidden_sizes,\n hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self.\n _hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=\n self._output_nonlinearity, output_w_init=self._output_w_init,\n output_b_init=self._output_b_init, name='mean_network',\n layer_normalization=self._layer_normalization)\n", (12415, 12806), False, 'from garage.tf.models.mlp import mlp\n'), ((15017, 15059), 'tensorflow.maximum', 'tf.maximum', (['std_param', 'self._min_std_param'], {}), '(std_param, self._min_std_param)\n', (15027, 15059), True, 'import tensorflow as tf\n'), ((15138, 15180), 'tensorflow.minimum', 'tf.minimum', (['std_param', 'self._max_std_param'], {}), '(std_param, self._max_std_param)\n', (15148, 15180), True, 'import tensorflow as tf\n'), ((15689, 15708), 'tensorflow.exp', 'tf.exp', (['log_std_var'], {}), '(log_std_var)\n', (15695, 15708), True, 'import tensorflow as tf\n'), ((11444, 11487), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""mean_network"""'], {}), "('mean_network')\n", (11471, 11487), True, 'import tensorflow as tf\n'), ((11582, 11628), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""log_std_network"""'], {}), "('log_std_network')\n", (11609, 11628), True, 'import tensorflow as tf\n'), ((13126, 13444), 'garage.tf.models.cnn.cnn', 'cnn', ([], {'input_var': 'state_input', 'filter_dims': 'self._std_filter_dims', 'hidden_nonlinearity': 'self._std_hidden_nonlinearity', 'hidden_w_init': 'self._std_hidden_w_init', 'hidden_b_init': 'self._std_hidden_b_init', 'num_filters': 'self._std_num_filters', 'strides': 'self._std_strides', 'padding': 'self._std_padding', 'name': '"""log_std_cnn"""'}), "(input_var=state_input, filter_dims=self._std_filter_dims,\n hidden_nonlinearity=self._std_hidden_nonlinearity, hidden_w_init=self.\n _std_hidden_w_init, hidden_b_init=self._std_hidden_b_init, num_filters=\n self._std_num_filters, strides=self._std_strides, padding=self.\n _std_padding, name='log_std_cnn')\n", (13129, 13444), False, 'from garage.tf.models.cnn import cnn\n'), ((8680, 8696), 'numpy.exp', 'np.exp', (['init_std'], {}), '(init_std)\n', (8686, 8696), True, 'import numpy as np\n'), ((10134, 10154), 'numpy.zeros', 'np.zeros', (['action_dim'], {}), '(action_dim)\n', (10142, 10154), True, 'import numpy as np\n'), ((10177, 10218), 'numpy.full', 'np.full', (['action_dim', 'self._init_std_param'], {}), '(action_dim, self._init_std_param)\n', (10184, 10218), True, 'import numpy as np\n'), ((11280, 11306), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['b'], {}), '(b)\n', (11303, 11306), True, 'import tensorflow as tf\n'), ((8785, 8800), 'numpy.exp', 'np.exp', (['min_std'], {}), '(min_std)\n', (8791, 8800), True, 'import numpy as np\n'), ((8889, 8904), 'numpy.exp', 'np.exp', (['max_std'], {}), '(max_std)\n', (8895, 8904), True, 'import numpy as np\n'), ((14230, 14275), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self._init_std_param'], {}), '(self._init_std_param)\n', (14253, 14275), True, 'import tensorflow as tf\n'), ((14630, 14675), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self._init_std_param'], {}), '(self._init_std_param)\n', (14653, 14675), True, 'import tensorflow as tf\n'), ((15514, 15531), 'tensorflow.exp', 'tf.exp', (['std_param'], {}), '(std_param)\n', (15520, 15531), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 15:15:03 2020
@author: <NAME>
Plot to vizualise the effect of background normalization
"""
# Import library
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
import numpy as np
# Function
def raw_vs_normalized_plot (adata, out_dir, log=True):
'''
Parameters:
adata- AnnData object created using the function df_to_annobject.
out_dir: Directory to which the images should be saved.
log: Bool. If True, then the data will be converted to log scale.
Returns:
PDF plots that will be saved in the specified directory
Example:
raw_vs_normalized_plot (adata, out_dir = "C:/Users/ajit/plots", log=True)
'''
# Figure
def plot_normalization_figure (adata, marker, out_dir):
# Data for plotting
m_idx = adata.var.index.tolist().index(marker) # Get the index of marker of interest
# Raw data
data = np.array(adata.raw.X[m_idx]).flatten()
# Normalized data
n_data = np.array(adata.X[:,m_idx]).flatten()
# Plot
sns.set_style("white")
if log == True:
sns.distplot(np.log1p(data), hist=False, rug=False, label="Before Normalization");
sns.distplot(np.log1p(n_data), hist=False, rug=False, label="After Normalization");
else:
sns.distplot(data, hist=False, rug=False, label="Before Normalization");
sns.distplot(n_data, hist=False, rug=False, label="After Normalization");
plt.savefig(out_dir + "/" + marker + ".pdf")
plt.clf()
# Run the function
r_figure = lambda x: plot_normalization_figure(marker = x, adata = adata, out_dir=out_dir)
figures = list(map(r_figure, adata.var.index)) # Apply function
| [
"seaborn.set",
"matplotlib.pyplot.savefig",
"seaborn.distplot",
"matplotlib.pyplot.clf",
"seaborn.set_style",
"numpy.array",
"numpy.log1p"
] | [((182, 207), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (189, 207), True, 'import seaborn as sns\n'), ((1114, 1136), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (1127, 1136), True, 'import seaborn as sns\n'), ((1555, 1599), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_dir + '/' + marker + '.pdf')"], {}), "(out_dir + '/' + marker + '.pdf')\n", (1566, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1617), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1615, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1383, 1454), 'seaborn.distplot', 'sns.distplot', (['data'], {'hist': '(False)', 'rug': '(False)', 'label': '"""Before Normalization"""'}), "(data, hist=False, rug=False, label='Before Normalization')\n", (1395, 1454), True, 'import seaborn as sns\n'), ((1473, 1545), 'seaborn.distplot', 'sns.distplot', (['n_data'], {'hist': '(False)', 'rug': '(False)', 'label': '"""After Normalization"""'}), "(n_data, hist=False, rug=False, label='After Normalization')\n", (1485, 1545), True, 'import seaborn as sns\n'), ((971, 999), 'numpy.array', 'np.array', (['adata.raw.X[m_idx]'], {}), '(adata.raw.X[m_idx])\n', (979, 999), True, 'import numpy as np\n'), ((1054, 1081), 'numpy.array', 'np.array', (['adata.X[:, m_idx]'], {}), '(adata.X[:, m_idx])\n', (1062, 1081), True, 'import numpy as np\n'), ((1186, 1200), 'numpy.log1p', 'np.log1p', (['data'], {}), '(data)\n', (1194, 1200), True, 'import numpy as np\n'), ((1286, 1302), 'numpy.log1p', 'np.log1p', (['n_data'], {}), '(n_data)\n', (1294, 1302), True, 'import numpy as np\n')] |
import numpy as np
from logbook import Logger
from catalyst.constants import LOG_LEVEL
from catalyst.protocol import Portfolio, Positions, Position
log = Logger('ExchangePortfolio', level=LOG_LEVEL)
class ExchangePortfolio(Portfolio):
"""
Since the goal is to support multiple exchanges, it makes sense to
include additional stats in the portfolio object. This fills the role
of Blotter and Portfolio in live mode.
Instead of relying on the performance tracker, each exchange portfolio
tracks its own holding. This offers a separation between tracking an
exchange and the statistics of the algorithm.
"""
def __init__(self, start_date, starting_cash=None):
self.capital_used = 0.0
self.starting_cash = starting_cash
self.portfolio_value = starting_cash
self.pnl = 0.0
self.returns = 0.0
self.cash = starting_cash
self.positions = Positions()
self.start_date = start_date
self.positions_value = 0.0
self.open_orders = dict()
def create_order(self, order):
"""
Create an open order and store in memory.
Parameters
----------
order: Order
"""
log.debug('creating order {}'.format(order.id))
open_orders = self.open_orders[order.asset] \
if order.asset is self.open_orders else []
open_orders.append(order)
self.open_orders[order.asset] = open_orders
order_position = self.positions[order.asset] \
if order.asset in self.positions else None
if order_position is None:
order_position = Position(order.asset)
self.positions[order.asset] = order_position
order_position.amount += order.amount
log.debug('open order added to portfolio')
def _remove_open_order(self, order):
try:
open_orders = self.open_orders[order.asset]
if order in open_orders:
open_orders.remove(order)
except Exception:
raise ValueError(
'unable to clear order not found in open order list.'
)
def execute_order(self, order, transaction):
"""
Update the open orders and positions to apply an executed order.
Unlike with backtesting, we do not need to add slippage and fees.
The executed price includes transaction fees.
Parameters
----------
order: Order
transaction: Transaction
"""
log.debug('executing order {}'.format(order.id))
self._remove_open_order(order)
order_position = self.positions[order.asset] \
if order.asset in self.positions else None
if order_position is None:
raise ValueError(
'Trying to execute order for a position not held:'
' {}'.format(order.id)
)
self.capital_used += order.amount * transaction.price
if order.amount > 0:
if order_position.cost_basis > 0:
order_position.cost_basis = np.average(
[order_position.cost_basis, transaction.price],
weights=[order_position.amount, order.amount]
)
else:
order_position.cost_basis = transaction.price
log.debug('updated portfolio with executed order')
def remove_order(self, order):
"""
Removing an open order.
Parameters
----------
order: Order
"""
log.info('removing cancelled order {}'.format(order.id))
self._remove_open_order(order)
order_position = self.positions[order.asset] \
if order.asset in self.positions else None
if order_position is None:
raise ValueError(
'Trying to remove order for a position not held: %s' % order.id
)
order_position.amount -= order.amount
log.debug('removed order from portfolio')
| [
"catalyst.protocol.Positions",
"logbook.Logger",
"catalyst.protocol.Position",
"numpy.average"
] | [((156, 200), 'logbook.Logger', 'Logger', (['"""ExchangePortfolio"""'], {'level': 'LOG_LEVEL'}), "('ExchangePortfolio', level=LOG_LEVEL)\n", (162, 200), False, 'from logbook import Logger\n'), ((928, 939), 'catalyst.protocol.Positions', 'Positions', ([], {}), '()\n', (937, 939), False, 'from catalyst.protocol import Portfolio, Positions, Position\n'), ((1647, 1668), 'catalyst.protocol.Position', 'Position', (['order.asset'], {}), '(order.asset)\n', (1655, 1668), False, 'from catalyst.protocol import Portfolio, Positions, Position\n'), ((3101, 3211), 'numpy.average', 'np.average', (['[order_position.cost_basis, transaction.price]'], {'weights': '[order_position.amount, order.amount]'}), '([order_position.cost_basis, transaction.price], weights=[\n order_position.amount, order.amount])\n', (3111, 3211), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Some code is from https://github.com/princeton-vl/pose-ae-train/blob/454d4ba113bbb9775d4dc259ef5e6c07c2ceed54/utils/group.py
# Written by <NAME> (<EMAIL>)
# Modified by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from munkres import Munkres
import numpy as np
import torch
def py_max_match(scores):
m = Munkres()
tmp = m.compute(scores)
tmp = np.array(tmp).astype(np.int32)
return tmp
def match_by_tag(inp, params):
assert isinstance(params, Params), 'params should be class Params()'
tag_k, loc_k, val_k = inp
default_ = np.zeros((params.num_joints, 3 + tag_k.shape[2]))
joint_dict = {}
tag_dict = {}
for i in range(params.num_joints):
idx = params.joint_order[i]
tags = tag_k[idx]
joints = np.concatenate(
(loc_k[idx], val_k[idx, :, None], tags), 1
)
mask = joints[:, 2] > params.detection_threshold
tags = tags[mask]
joints = joints[mask]
if joints.shape[0] == 0:
continue
if i == 0 or len(joint_dict) == 0:
for tag, joint in zip(tags, joints):
key = tag[0]
joint_dict.setdefault(key, np.copy(default_))[idx] = joint
tag_dict[key] = [tag]
else:
grouped_keys = list(joint_dict.keys())[:params.max_num_people]
grouped_tags = [np.mean(tag_dict[i], axis=0) for i in grouped_keys]
if params.ignore_too_much \
and len(grouped_keys) == params.max_num_people:
continue
diff = joints[:, None, 3:] - np.array(grouped_tags)[None, :, :]
diff_normed = np.linalg.norm(diff, ord=2, axis=2)
diff_saved = np.copy(diff_normed)
if params.use_detection_val:
diff_normed = np.round(diff_normed) * 100 - joints[:, 2:3]
num_added = diff.shape[0]
num_grouped = diff.shape[1]
if num_added > num_grouped:
diff_normed = np.concatenate(
(
diff_normed,
np.zeros((num_added, num_added-num_grouped))+1e10
),
axis=1
)
pairs = py_max_match(diff_normed)
for row, col in pairs:
if (
row < num_added
and col < num_grouped
and diff_saved[row][col] < params.tag_threshold
):
key = grouped_keys[col]
joint_dict[key][idx] = joints[row]
tag_dict[key].append(tags[row])
else:
key = tags[row][0]
joint_dict.setdefault(key, np.copy(default_))[idx] = \
joints[row]
tag_dict[key] = [tags[row]]
ans = np.array([joint_dict[i] for i in joint_dict]).astype(np.float32)
return ans
class Params(object):
def __init__(self, cfg):
self.num_joints = cfg['DATASET']['NUM_JOINTS']
self.max_num_people = cfg['DATASET']['MAX_NUM_PEOPLE']
self.detection_threshold = cfg['TEST']['DETECTION_THRESHOLD']
self.tag_threshold = cfg['TEST']['TAG_THRESHOLD']
self.use_detection_val = cfg['TEST']['USE_DETECTION_VAL']
self.ignore_too_much = cfg['TEST']['IGNORE_TOO_MUCH']
if cfg['DATASET']['WITH_CENTER'] and cfg['TEST']['IGNORE_CENTER']:
self.num_joints -= 1
if cfg['DATASET']['WITH_CENTER'] and not cfg['TEST']['IGNORE_CENTER']:
self.joint_order = [
i-1 for i in [18, 1, 2, 3, 4, 5, 6, 7, 12, 13, 8, 9, 10, 11, 14, 15, 16, 17]
]
else:
self.joint_order = [
i-1 for i in [1, 2, 3, 4, 5, 6, 7, 12, 13, 8, 9, 10, 11, 14, 15, 16, 17]
]
class HeatmapParser(object):
def __init__(self, cfg):
self.params = Params(cfg)
self.tag_per_joint = cfg['MODEL']['TAG_PER_JOINT']
self.pool = torch.nn.MaxPool2d(
cfg['TEST']['NMS_KERNEL'], 1, cfg['TEST']['NMS_PADDING']
)
def nms(self, det):
maxm = self.pool(det)
maxm = torch.eq(maxm, det).float()
det = det * maxm
return det
def match(self, tag_k, loc_k, val_k):
match = lambda x: match_by_tag(x, self.params)
return list(map(match, zip(tag_k, loc_k, val_k)))
def top_k(self, det, tag):
# det = torch.Tensor(det, requires_grad=False)
# tag = torch.Tensor(tag, requires_grad=False)
det = self.nms(det)
num_images = det.size(0)
num_joints = det.size(1)
h = det.size(2)
w = det.size(3)
det = det.view(num_images, num_joints, -1)
val_k, ind = det.topk(self.params.max_num_people, dim=2)
tag = tag.view(tag.size(0), tag.size(1), w*h, -1)
if not self.tag_per_joint:
tag = tag.expand(-1, self.params.num_joints, -1, -1)
tag_k = torch.stack(
[
torch.gather(tag[:, :, :, i], 2, ind)
for i in range(tag.size(3))
],
dim=3
)
x = ind % w
y = (ind / w).long()
ind_k = torch.stack((x, y), dim=3)
ans = {
'tag_k': tag_k.cpu().numpy(),
'loc_k': ind_k.cpu().numpy(),
'val_k': val_k.cpu().numpy()
}
return ans
def adjust(self, ans, det):
for batch_id, people in enumerate(ans):
for people_id, i in enumerate(people):
for joint_id, joint in enumerate(i):
if joint[2] > 0:
y, x = joint[0:2]
xx, yy = int(x), int(y)
#print(batch_id, joint_id, det[batch_id].shape)
tmp = det[batch_id][joint_id]
if tmp[xx, min(yy+1, tmp.shape[1]-1)] > tmp[xx, max(yy-1, 0)]:
y += 0.25
else:
y -= 0.25
if tmp[min(xx+1, tmp.shape[0]-1), yy] > tmp[max(0, xx-1), yy]:
x += 0.25
else:
x -= 0.25
ans[batch_id][people_id, joint_id, 0:2] = (y+0.5, x+0.5)
return ans
def refine(self, det, tag, keypoints):
"""
Given initial keypoint predictions, we identify missing joints
:param det: numpy.ndarray of size (17, 128, 128)
:param tag: numpy.ndarray of size (17, 128, 128) if not flip
:param keypoints: numpy.ndarray of size (17, 4) if not flip, last dim is (x, y, det score, tag score)
:return:
"""
if len(tag.shape) == 3:
# tag shape: (17, 128, 128, 1)
tag = tag[:, :, :, None]
tags = []
for i in range(keypoints.shape[0]):
if keypoints[i, 2] > 0:
# save tag value of detected keypoint
x, y = keypoints[i][:2].astype(np.int32)
tags.append(tag[i, y, x])
# mean tag of current detected people
prev_tag = np.mean(tags, axis=0)
ans = []
for i in range(keypoints.shape[0]):
# score of joints i at all position
tmp = det[i, :, :]
# distance of all tag values with mean tag of current detected people
tt = (((tag[i, :, :] - prev_tag[None, None, :]) ** 2).sum(axis=2) ** 0.5)
tmp2 = tmp - np.round(tt)
# find maximum position
y, x = np.unravel_index(np.argmax(tmp2), tmp.shape)
xx = x
yy = y
# detection score at maximum position
val = tmp[y, x]
# offset by 0.5
x += 0.5
y += 0.5
# add a quarter offset
if tmp[yy, min(xx + 1, tmp.shape[1] - 1)] > tmp[yy, max(xx - 1, 0)]:
x += 0.25
else:
x -= 0.25
if tmp[min(yy + 1, tmp.shape[0] - 1), xx] > tmp[max(0, yy - 1), xx]:
y += 0.25
else:
y -= 0.25
ans.append((x, y, val))
ans = np.array(ans)
if ans is not None:
for i in range(det.shape[0]):
# add keypoint if it is not detected
if ans[i, 2] > 0 and keypoints[i, 2] == 0:
# if ans[i, 2] > 0.01 and keypoints[i, 2] == 0:
keypoints[i, :2] = ans[i, :2]
keypoints[i, 2] = ans[i, 2]
return keypoints
def parse(self, det, tag, adjust=True, refine=True):
ans = self.match(**self.top_k(det, tag))
if adjust:
ans = self.adjust(ans, det)
scores = [i[:, 2].mean() for i in ans[0]]
if refine:
ans = ans[0]
# for every detected person
for i in range(len(ans)):
det_numpy = det[0].cpu().numpy()
tag_numpy = tag[0].cpu().numpy()
if not self.tag_per_joint:
tag_numpy = np.tile(
tag_numpy, (self.params.num_joints, 1, 1, 1)
)
ans[i] = self.refine(det_numpy, tag_numpy, ans[i])
ans = [ans]
return ans, scores
| [
"numpy.mean",
"numpy.copy",
"numpy.tile",
"torch.stack",
"numpy.argmax",
"torch.eq",
"numpy.array",
"numpy.zeros",
"torch.nn.MaxPool2d",
"munkres.Munkres",
"numpy.concatenate",
"numpy.linalg.norm",
"torch.gather",
"numpy.round"
] | [((617, 626), 'munkres.Munkres', 'Munkres', ([], {}), '()\n', (624, 626), False, 'from munkres import Munkres\n'), ((863, 912), 'numpy.zeros', 'np.zeros', (['(params.num_joints, 3 + tag_k.shape[2])'], {}), '((params.num_joints, 3 + tag_k.shape[2]))\n', (871, 912), True, 'import numpy as np\n'), ((1071, 1129), 'numpy.concatenate', 'np.concatenate', (['(loc_k[idx], val_k[idx, :, None], tags)', '(1)'], {}), '((loc_k[idx], val_k[idx, :, None], tags), 1)\n', (1085, 1129), True, 'import numpy as np\n'), ((4332, 4408), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (["cfg['TEST']['NMS_KERNEL']", '(1)', "cfg['TEST']['NMS_PADDING']"], {}), "(cfg['TEST']['NMS_KERNEL'], 1, cfg['TEST']['NMS_PADDING'])\n", (4350, 4408), False, 'import torch\n'), ((5541, 5567), 'torch.stack', 'torch.stack', (['(x, y)'], {'dim': '(3)'}), '((x, y), dim=3)\n', (5552, 5567), False, 'import torch\n'), ((7488, 7509), 'numpy.mean', 'np.mean', (['tags'], {'axis': '(0)'}), '(tags, axis=0)\n', (7495, 7509), True, 'import numpy as np\n'), ((8534, 8547), 'numpy.array', 'np.array', (['ans'], {}), '(ans)\n', (8542, 8547), True, 'import numpy as np\n'), ((665, 678), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (673, 678), True, 'import numpy as np\n'), ((1956, 1991), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {'ord': '(2)', 'axis': '(2)'}), '(diff, ord=2, axis=2)\n', (1970, 1991), True, 'import numpy as np\n'), ((2017, 2037), 'numpy.copy', 'np.copy', (['diff_normed'], {}), '(diff_normed)\n', (2024, 2037), True, 'import numpy as np\n'), ((3172, 3217), 'numpy.array', 'np.array', (['[joint_dict[i] for i in joint_dict]'], {}), '([joint_dict[i] for i in joint_dict])\n', (3180, 3217), True, 'import numpy as np\n'), ((1672, 1700), 'numpy.mean', 'np.mean', (['tag_dict[i]'], {'axis': '(0)'}), '(tag_dict[i], axis=0)\n', (1679, 1700), True, 'import numpy as np\n'), ((4501, 4520), 'torch.eq', 'torch.eq', (['maxm', 'det'], {}), '(maxm, det)\n', (4509, 4520), False, 'import torch\n'), ((5349, 5386), 'torch.gather', 'torch.gather', (['tag[:, :, :, i]', '(2)', 'ind'], {}), '(tag[:, :, :, i], 2, ind)\n', (5361, 5386), False, 'import torch\n'), ((7844, 7856), 'numpy.round', 'np.round', (['tt'], {}), '(tt)\n', (7852, 7856), True, 'import numpy as np\n'), ((7930, 7945), 'numpy.argmax', 'np.argmax', (['tmp2'], {}), '(tmp2)\n', (7939, 7945), True, 'import numpy as np\n'), ((1895, 1917), 'numpy.array', 'np.array', (['grouped_tags'], {}), '(grouped_tags)\n', (1903, 1917), True, 'import numpy as np\n'), ((9433, 9486), 'numpy.tile', 'np.tile', (['tag_numpy', '(self.params.num_joints, 1, 1, 1)'], {}), '(tag_numpy, (self.params.num_joints, 1, 1, 1))\n', (9440, 9486), True, 'import numpy as np\n'), ((1485, 1502), 'numpy.copy', 'np.copy', (['default_'], {}), '(default_)\n', (1492, 1502), True, 'import numpy as np\n'), ((2110, 2131), 'numpy.round', 'np.round', (['diff_normed'], {}), '(diff_normed)\n', (2118, 2131), True, 'import numpy as np\n'), ((2404, 2450), 'numpy.zeros', 'np.zeros', (['(num_added, num_added - num_grouped)'], {}), '((num_added, num_added - num_grouped))\n', (2412, 2450), True, 'import numpy as np\n'), ((3049, 3066), 'numpy.copy', 'np.copy', (['default_'], {}), '(default_)\n', (3056, 3066), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from FEM.PlaneStress import PlaneStress
from FEM.Mesh.Geometry import Geometry
# 11.7.1 Ed 3
b = 120
h = 160
t = 0.036
E = 30*10**(6)
v = 0.25
gdls = [[0, 0], [b, 0], [0, h], [b, h]]
elemento1 = [0, 1, 3]
elemento2 = [0, 3, 2]
dicc = [elemento1, elemento2]
tipos = ['T1V', 'T1V']
segmentos = [[0, 1], [1, 3], [3, 2], [2, 0]]
geometria = Geometry(dicc, gdls, tipos, nvn=2, segments=segmentos)
cbe = geometria.cbFromSegment(3, 0, 1)
cbe += geometria.cbFromSegment(3, 0, 2)
geometria.cbe = cbe
geometria.loadOnSegment(1, fx=lambda s: 10, fy=lambda s: 0)
geometria.show()
plt.show()
O = PlaneStress(geometria, E, v, t)
O.elementMatrices()
O.ensembling()
O.borderConditions()
O.solveES()
O.postProcess()
# plt.show()
print(O.giveStressPoint(np.array([[60], [80]])))
| [
"numpy.array",
"FEM.PlaneStress.PlaneStress",
"matplotlib.pyplot.show",
"FEM.Mesh.Geometry.Geometry"
] | [((389, 443), 'FEM.Mesh.Geometry.Geometry', 'Geometry', (['dicc', 'gdls', 'tipos'], {'nvn': '(2)', 'segments': 'segmentos'}), '(dicc, gdls, tipos, nvn=2, segments=segmentos)\n', (397, 443), False, 'from FEM.Mesh.Geometry import Geometry\n'), ((620, 630), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (628, 630), True, 'import matplotlib.pyplot as plt\n'), ((635, 666), 'FEM.PlaneStress.PlaneStress', 'PlaneStress', (['geometria', 'E', 'v', 't'], {}), '(geometria, E, v, t)\n', (646, 666), False, 'from FEM.PlaneStress import PlaneStress\n'), ((789, 811), 'numpy.array', 'np.array', (['[[60], [80]]'], {}), '([[60], [80]])\n', (797, 811), True, 'import numpy as np\n')] |
# load libraries
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
# Create feature matrix with categorical feature
x = np.array([[0, 2.10, 1.45],
[1, 1.18, 1.33],
[0, 1.22, 1.27],
[1, -0.21, -1.19]])
# Create feature matrix with missing values in the categorical feature
X_with_nan = np.array([[np.nan, 0.87, 1.31],
[np.nan, -0.67, -0.22]])
# train KNN learner
clf = KNeighborsClassifier(3, weights='distance')
trained_model = clf.fit(x[:,1:], x[:,0])
# predict missing values class
imputed_values = trained_model.predict(X_with_nan[:,1:])
# join column of predicted class with their other features
x_with_imputed = np.hstack((imputed_values.reshape(-1,1), X_with_nan[:,1:]))
# join the two feature matrices
np.vstack((x_with_imputed, x))
| [
"numpy.array",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.vstack"
] | [((141, 220), 'numpy.array', 'np.array', (['[[0, 2.1, 1.45], [1, 1.18, 1.33], [0, 1.22, 1.27], [1, -0.21, -1.19]]'], {}), '([[0, 2.1, 1.45], [1, 1.18, 1.33], [0, 1.22, 1.27], [1, -0.21, -1.19]])\n', (149, 220), True, 'import numpy as np\n'), ((349, 405), 'numpy.array', 'np.array', (['[[np.nan, 0.87, 1.31], [np.nan, -0.67, -0.22]]'], {}), '([[np.nan, 0.87, 1.31], [np.nan, -0.67, -0.22]])\n', (357, 405), True, 'import numpy as np\n'), ((456, 499), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (['(3)'], {'weights': '"""distance"""'}), "(3, weights='distance')\n", (476, 499), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((800, 830), 'numpy.vstack', 'np.vstack', (['(x_with_imputed, x)'], {}), '((x_with_imputed, x))\n', (809, 830), True, 'import numpy as np\n')] |
import config
import models
import tensorflow as tf
import numpy as np
import sys
#Dataset to run on
dataset = sys.argv[1].upper()
#Train TransR based on pretrained TransE results.
#++++++++++++++TransE++++++++++++++++++++
con = config.Config()
con.set_in_path("./benchmarks/{}/".format(dataset))
con.set_work_threads(16)
con.set_train_times(500)
con.set_nbatches(100)
con.set_alpha(0.001)
con.set_bern(0)
con.set_dimension(100)
con.set_margin(1)
con.set_ent_neg_rate(1)
con.set_rel_neg_rate(0)
con.set_opt_method("SGD")
con.init()
con.set_model(models.TransE)
con.run()
parameters = con.get_parameters("numpy")
#++++++++++++++TransR++++++++++++++++++++
conR = config.Config()
#Input training files from benchmarks/{dataset}/ folder.
conR.set_in_path("./benchmarks/{}/".format(dataset))
#True: Input test files from the same folder.
#We do not need link prediction here
#conR.set_test_link_prediction(True)
conR.set_test_triple_classification(True)
conR.set_work_threads(16)
conR.set_train_times(500)
conR.set_nbatches(100)
conR.set_alpha(0.001)
conR.set_bern(0)
conR.set_dimension(100)
conR.set_margin(1)
conR.set_ent_neg_rate(1)
conR.set_rel_neg_rate(0)
conR.set_opt_method("SGD")
#Models will be exported via tf.Saver() automatically.
conR.set_export_files("./res/model.vec.tf", 0)
#Model parameters will be exported to json files automatically.
conR.set_out_files("./res/embedding.vec.json")
#Initialize experimental settings.
conR.init()
#Load pretrained TransE results.
conR.set_model(models.TransR)
parameters["transfer_matrix"] = np.array([(np.identity(100).reshape((100*100))) for i in range(conR.get_rel_total())])
conR.set_parameters(parameters)
#Train the model.
conR.run()
#To test models after training needs "set_test_flag(True)".
conR.test()
| [
"numpy.identity",
"config.Config"
] | [((231, 246), 'config.Config', 'config.Config', ([], {}), '()\n', (244, 246), False, 'import config\n'), ((665, 680), 'config.Config', 'config.Config', ([], {}), '()\n', (678, 680), False, 'import config\n'), ((1555, 1571), 'numpy.identity', 'np.identity', (['(100)'], {}), '(100)\n', (1566, 1571), True, 'import numpy as np\n')] |
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#pip install tensorflow==2.3.1
#pip install tensorflow-quantum
import tensorflow as tf
import tensorflow_quantum as tfq
import cirq
import sympy
import numpy as np
# visualization tools#%matplotlib inline
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
qubit = cirq.GridQubit(0, 0)
# Define some circuits.
circuit1 = cirq.Circuit(cirq.X(qubit))
circuit2 = cirq.Circuit(cirq.H(qubit))
# Convert to a tensor.
input_circuit_tensor = tfq.convert_to_tensor([circuit1, circuit2])
# Define a circuit that we want to append
y_circuit = cirq.Circuit(cirq.Y(qubit))
# Instantiate our layer
y_appender = tfq.layers.AddCircuit()
# Run our circuit tensor through the layer and save the output.
output_circuit_tensor = y_appender(input_circuit_tensor, append=y_circuit)
print(tfq.from_tensor(input_circuit_tensor))
print(tfq.from_tensor(output_circuit_tensor))
def generate_data(qubits):
"""Generate training and testing data."""
n_rounds = 20 # Produces n_rounds * n_qubits datapoints.
excitations = []
labels = []
for n in range(n_rounds):
for bit in qubits:
rng = np.random.uniform(-np.pi, np.pi)
excitations.append(cirq.Circuit(cirq.rx(rng)(bit)))
labels.append(1 if (-np.pi / 2) <= rng <= (np.pi / 2) else -1)
split_ind = int(len(excitations) * 0.7)
train_excitations = excitations[:split_ind]
test_excitations = excitations[split_ind:]
train_labels = labels[:split_ind]
test_labels = labels[split_ind:]
return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \
tfq.convert_to_tensor(test_excitations), np.array(test_labels)
sample_points, sample_labels, _, __ = generate_data(cirq.GridQubit.rect(1, 4))
print('Input:', tfq.from_tensor(sample_points)[0], 'Output:', sample_labels[0])
print('Input:', tfq.from_tensor(sample_points)[1], 'Output:', sample_labels[1])
def cluster_state_circuit(bits):
"""Return a cluster state on the qubits in `bits`."""
circuit = cirq.Circuit()
circuit.append(cirq.H.on_each(bits))
for this_bit, next_bit in zip(bits, bits[1:] + [bits[0]]):
circuit.append(cirq.CZ(this_bit, next_bit))
return circuit
SVGCircuit(cluster_state_circuit(cirq.GridQubit.rect(1, 4)))
def one_qubit_unitary(bit, symbols):
"""Make a Cirq circuit enacting a rotation of the bloch sphere about the X,
Y and Z axis, that depends on the values in `symbols`.
"""
return cirq.Circuit(
cirq.X(bit)**symbols[0],
cirq.Y(bit)**symbols[1],
cirq.Z(bit)**symbols[2])
def two_qubit_unitary(bits, symbols):
"""Make a Cirq circuit that creates an arbitrary two qubit unitary."""
circuit = cirq.Circuit()
circuit += one_qubit_unitary(bits[0], symbols[0:3])
circuit += one_qubit_unitary(bits[1], symbols[3:6])
circuit += [cirq.ZZ(*bits)**symbols[6]]
circuit += [cirq.YY(*bits)**symbols[7]]
circuit += [cirq.XX(*bits)**symbols[8]]
circuit += one_qubit_unitary(bits[0], symbols[9:12])
circuit += one_qubit_unitary(bits[1], symbols[12:])
return circuit
def two_qubit_pool(source_qubit, sink_qubit, symbols):
"""Make a Cirq circuit to do a parameterized 'pooling' operation, which
attempts to reduce entanglement down from two qubits to just one."""
pool_circuit = cirq.Circuit()
sink_basis_selector = one_qubit_unitary(sink_qubit, symbols[0:3])
source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])
pool_circuit.append(sink_basis_selector)
pool_circuit.append(source_basis_selector)
pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))
pool_circuit.append(sink_basis_selector**-1)
return pool_circuit
SVGCircuit(one_qubit_unitary(cirq.GridQubit(0, 0), sympy.symbols('x0:3')))
SVGCircuit(two_qubit_unitary(cirq.GridQubit.rect(1, 2), sympy.symbols('x0:15')))
SVGCircuit(two_qubit_pool(*cirq.GridQubit.rect(1, 2), sympy.symbols('x0:6')))
def quantum_conv_circuit(bits, symbols):
"""Quantum Convolution Layer following the above diagram.
Return a Cirq circuit with the cascade of `two_qubit_unitary` applied
to all pairs of qubits in `bits` as in the diagram above.
"""
circuit = cirq.Circuit()
for first, second in zip(bits[0::2], bits[1::2]):
circuit += two_qubit_unitary([first, second], symbols)
for first, second in zip(bits[1::2], bits[2::2] + [bits[0]]):
circuit += two_qubit_unitary([first, second], symbols)
return circuit
SVGCircuit(
quantum_conv_circuit(cirq.GridQubit.rect(1, 8), sympy.symbols('x0:15')))
def quantum_pool_circuit(source_bits, sink_bits, symbols):
"""A layer that specifies a quantum pooling operation.
A Quantum pool tries to learn to pool the relevant information from two
qubits onto 1.
"""
circuit = cirq.Circuit()
for source, sink in zip(source_bits, sink_bits):
circuit += two_qubit_pool(source, sink, symbols)
return circuit
test_bits = cirq.GridQubit.rect(1, 8)
SVGCircuit(
quantum_pool_circuit(test_bits[:4], test_bits[4:], sympy.symbols('x0:6')))
def create_model_circuit(qubits):
"""Create sequence of alternating convolution and pooling operators
which gradually shrink over time."""
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:63')
# Cirq uses sympy.Symbols to map learnable variables. TensorFlow Quantum
# scans incoming circuits and replaces these with TensorFlow variables.
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
model_circuit += quantum_conv_circuit(qubits[4:], symbols[21:36])
model_circuit += quantum_pool_circuit(qubits[4:6], qubits[6:],
symbols[36:42])
model_circuit += quantum_conv_circuit(qubits[6:], symbols[42:57])
model_circuit += quantum_pool_circuit([qubits[6]], [qubits[7]],
symbols[57:63])
return model_circuit
# Create our qubits and readout operators in Cirq.
cluster_state_bits = cirq.GridQubit.rect(1, 8)
readout_operators = cirq.Z(cluster_state_bits[-1])
# Build a sequential model enacting the logic in 1.3 of this notebook.
# Here you are making the static cluster state prep as a part of the AddCircuit and the
# "quantum datapoints" are coming in the form of excitation
excitation_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state = tfq.layers.AddCircuit()(
excitation_input, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model = tfq.layers.PQC(create_model_circuit(cluster_state_bits),
readout_operators)(cluster_state)
qcnn_model = tf.keras.Model(inputs=[excitation_input], outputs=[quantum_model])
# Show the keras plot of the model
# Could not get this working
#tf.keras.utils.plot_model(qcnn_model,
# show_shapes=True,
# show_layer_names=False,
# dpi=70)
# Generate some training data.
train_excitations, train_labels, test_excitations, test_labels = generate_data(
cluster_state_bits)
# Custom accuracy metric.
@tf.function
def custom_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true)
y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)
return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))
qcnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
history = qcnn_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations, test_labels))
plt.plot(history.history['loss'][1:], label='Training')
plt.plot(history.history['val_loss'][1:], label='Validation')
plt.title('Training a Quantum CNN to Detect Excited Cluster States')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
# 1-local operators to read out
readouts = [cirq.Z(bit) for bit in cluster_state_bits[4:]]
def multi_readout_model_circuit(qubits):
"""Make a model circuit with less quantum pool and conv operations."""
model_circuit = cirq.Circuit()
symbols = sympy.symbols('qconv0:21')
model_circuit += quantum_conv_circuit(qubits, symbols[0:15])
model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],
symbols[15:21])
return model_circuit
# Build a model enacting the logic in 2.1 of this notebook.
excitation_input_dual = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_dual = tfq.layers.AddCircuit()(
excitation_input_dual, prepend=cluster_state_circuit(cluster_state_bits))
quantum_model_dual = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_dual)
d1_dual = tf.keras.layers.Dense(8)(quantum_model_dual)
d2_dual = tf.keras.layers.Dense(1)(d1_dual)
hybrid_model = tf.keras.Model(inputs=[excitation_input_dual], outputs=[d2_dual])
# Display the model architecture
#tf.keras.utils.plot_model(hybrid_model,
# show_shapes=True,
# show_layer_names=False,
# dpi=70)
hybrid_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
hybrid_history = hybrid_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'], label='Hybrid CNN')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
excitation_input_multi = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
cluster_state_multi = tfq.layers.AddCircuit()(
excitation_input_multi, prepend=cluster_state_circuit(cluster_state_bits))
# apply 3 different filters and measure expectation values
quantum_model_multi1 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi2 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
quantum_model_multi3 = tfq.layers.PQC(
multi_readout_model_circuit(cluster_state_bits),
readouts)(cluster_state_multi)
# concatenate outputs and feed into a small classical NN
concat_out = tf.keras.layers.concatenate(
[quantum_model_multi1, quantum_model_multi2, quantum_model_multi3])
dense_1 = tf.keras.layers.Dense(8)(concat_out)
dense_2 = tf.keras.layers.Dense(1)(dense_1)
multi_qconv_model = tf.keras.Model(inputs=[excitation_input_multi],
outputs=[dense_2])
# Display the model architecture
#tf.keras.utils.plot_model(multi_qconv_model,
# show_shapes=True,
# show_layer_names=True,
# dpi=70)
multi_qconv_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),
loss=tf.losses.mse,
metrics=[custom_accuracy])
multi_qconv_history = multi_qconv_model.fit(x=train_excitations,
y=train_labels,
batch_size=16,
epochs=25,
verbose=1,
validation_data=(test_excitations,
test_labels))
plt.plot(history.history['val_custom_accuracy'][:25], label='QCNN')
plt.plot(hybrid_history.history['val_custom_accuracy'][:25], label='Hybrid CNN')
plt.plot(multi_qconv_history.history['val_custom_accuracy'][:25],
label='Hybrid CNN \n Multiple Quantum Filters')
plt.title('Quantum vs Hybrid CNN performance')
plt.xlabel('Epochs')
plt.legend()
plt.ylabel('Validation Accuracy')
#plt.show()
plt.draw()
plt.pause(0.001)
input("Open Ports --> Open Preview or Browser --> push enter to continue")
| [
"matplotlib.pyplot.ylabel",
"cirq.GridQubit",
"tensorflow_quantum.convert_to_tensor",
"cirq.Circuit",
"numpy.array",
"cirq.H.on_each",
"tensorflow.keras.layers.Dense",
"cirq.YY",
"cirq.CNOT",
"cirq.ZZ",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"cirq.XX",
"cirq.rx",
"matplotl... | [((775, 798), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (789, 798), False, 'import matplotlib\n'), ((881, 901), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (895, 901), False, 'import cirq\n'), ((1052, 1095), 'tensorflow_quantum.convert_to_tensor', 'tfq.convert_to_tensor', (['[circuit1, circuit2]'], {}), '([circuit1, circuit2])\n', (1073, 1095), True, 'import tensorflow_quantum as tfq\n'), ((1217, 1240), 'tensorflow_quantum.layers.AddCircuit', 'tfq.layers.AddCircuit', ([], {}), '()\n', (1238, 1240), True, 'import tensorflow_quantum as tfq\n'), ((5575, 5600), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(8)'], {}), '(1, 8)\n', (5594, 5600), False, 'import cirq\n'), ((6750, 6775), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(8)'], {}), '(1, 8)\n', (6769, 6775), False, 'import cirq\n'), ((6796, 6826), 'cirq.Z', 'cirq.Z', (['cluster_state_bits[-1]'], {}), '(cluster_state_bits[-1])\n', (6802, 6826), False, 'import cirq\n'), ((7066, 7114), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '()', 'dtype': 'tf.dtypes.string'}), '(shape=(), dtype=tf.dtypes.string)\n', (7080, 7114), True, 'import tensorflow as tf\n'), ((7382, 7448), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[excitation_input]', 'outputs': '[quantum_model]'}), '(inputs=[excitation_input], outputs=[quantum_model])\n', (7396, 7448), True, 'import tensorflow as tf\n'), ((8505, 8560), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss'][1:]"], {'label': '"""Training"""'}), "(history.history['loss'][1:], label='Training')\n", (8513, 8560), True, 'import matplotlib.pyplot as plt\n'), ((8561, 8622), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss'][1:]"], {'label': '"""Validation"""'}), "(history.history['val_loss'][1:], label='Validation')\n", (8569, 8622), True, 'import matplotlib.pyplot as plt\n'), ((8623, 8691), 'matplotlib.pyplot.title', 'plt.title', (['"""Training a Quantum CNN to Detect Excited Cluster States"""'], {}), "('Training a Quantum CNN to Detect Excited Cluster States')\n", (8632, 8691), True, 'import matplotlib.pyplot as plt\n'), ((8692, 8712), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (8702, 8712), True, 'import matplotlib.pyplot as plt\n'), ((8713, 8731), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (8723, 8731), True, 'import matplotlib.pyplot as plt\n'), ((8732, 8744), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8742, 8744), True, 'import matplotlib.pyplot as plt\n'), ((8757, 8767), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (8765, 8767), True, 'import matplotlib.pyplot as plt\n'), ((8768, 8784), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (8777, 8784), True, 'import matplotlib.pyplot as plt\n'), ((9446, 9494), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '()', 'dtype': 'tf.dtypes.string'}), '(shape=(), dtype=tf.dtypes.string)\n', (9460, 9494), True, 'import tensorflow as tf\n'), ((9862, 9927), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[excitation_input_dual]', 'outputs': '[d2_dual]'}), '(inputs=[excitation_input_dual], outputs=[d2_dual])\n', (9876, 9927), True, 'import tensorflow as tf\n'), ((10681, 10743), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_custom_accuracy']"], {'label': '"""QCNN"""'}), "(history.history['val_custom_accuracy'], label='QCNN')\n", (10689, 10743), True, 'import matplotlib.pyplot as plt\n'), ((10744, 10819), 'matplotlib.pyplot.plot', 'plt.plot', (["hybrid_history.history['val_custom_accuracy']"], {'label': '"""Hybrid CNN"""'}), "(hybrid_history.history['val_custom_accuracy'], label='Hybrid CNN')\n", (10752, 10819), True, 'import matplotlib.pyplot as plt\n'), ((10820, 10866), 'matplotlib.pyplot.title', 'plt.title', (['"""Quantum vs Hybrid CNN performance"""'], {}), "('Quantum vs Hybrid CNN performance')\n", (10829, 10866), True, 'import matplotlib.pyplot as plt\n'), ((10867, 10887), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (10877, 10887), True, 'import matplotlib.pyplot as plt\n'), ((10888, 10900), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10898, 10900), True, 'import matplotlib.pyplot as plt\n'), ((10901, 10934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation Accuracy"""'], {}), "('Validation Accuracy')\n", (10911, 10934), True, 'import matplotlib.pyplot as plt\n'), ((10947, 10957), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (10955, 10957), True, 'import matplotlib.pyplot as plt\n'), ((10958, 10974), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (10967, 10974), True, 'import matplotlib.pyplot as plt\n'), ((11076, 11124), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '()', 'dtype': 'tf.dtypes.string'}), '(shape=(), dtype=tf.dtypes.string)\n', (11090, 11124), True, 'import tensorflow as tf\n'), ((11767, 11866), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[quantum_model_multi1, quantum_model_multi2, quantum_model_multi3]'], {}), '([quantum_model_multi1, quantum_model_multi2,\n quantum_model_multi3])\n', (11794, 11866), True, 'import tensorflow as tf\n'), ((11982, 12048), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[excitation_input_multi]', 'outputs': '[dense_2]'}), '(inputs=[excitation_input_multi], outputs=[dense_2])\n', (11996, 12048), True, 'import tensorflow as tf\n'), ((12887, 12954), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_custom_accuracy'][:25]"], {'label': '"""QCNN"""'}), "(history.history['val_custom_accuracy'][:25], label='QCNN')\n", (12895, 12954), True, 'import matplotlib.pyplot as plt\n'), ((12955, 13040), 'matplotlib.pyplot.plot', 'plt.plot', (["hybrid_history.history['val_custom_accuracy'][:25]"], {'label': '"""Hybrid CNN"""'}), "(hybrid_history.history['val_custom_accuracy'][:25], label='Hybrid CNN'\n )\n", (12963, 13040), True, 'import matplotlib.pyplot as plt\n'), ((13036, 13157), 'matplotlib.pyplot.plot', 'plt.plot', (["multi_qconv_history.history['val_custom_accuracy'][:25]"], {'label': '"""Hybrid CNN \n Multiple Quantum Filters"""'}), '(multi_qconv_history.history[\'val_custom_accuracy\'][:25], label=\n """Hybrid CNN \n Multiple Quantum Filters""")\n', (13044, 13157), True, 'import matplotlib.pyplot as plt\n'), ((13159, 13205), 'matplotlib.pyplot.title', 'plt.title', (['"""Quantum vs Hybrid CNN performance"""'], {}), "('Quantum vs Hybrid CNN performance')\n", (13168, 13205), True, 'import matplotlib.pyplot as plt\n'), ((13206, 13226), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (13216, 13226), True, 'import matplotlib.pyplot as plt\n'), ((13227, 13239), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13237, 13239), True, 'import matplotlib.pyplot as plt\n'), ((13240, 13273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation Accuracy"""'], {}), "('Validation Accuracy')\n", (13250, 13273), True, 'import matplotlib.pyplot as plt\n'), ((13286, 13296), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (13294, 13296), True, 'import matplotlib.pyplot as plt\n'), ((13297, 13313), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (13306, 13313), True, 'import matplotlib.pyplot as plt\n'), ((951, 964), 'cirq.X', 'cirq.X', (['qubit'], {}), '(qubit)\n', (957, 964), False, 'import cirq\n'), ((990, 1003), 'cirq.H', 'cirq.H', (['qubit'], {}), '(qubit)\n', (996, 1003), False, 'import cirq\n'), ((1164, 1177), 'cirq.Y', 'cirq.Y', (['qubit'], {}), '(qubit)\n', (1170, 1177), False, 'import cirq\n'), ((1388, 1425), 'tensorflow_quantum.from_tensor', 'tfq.from_tensor', (['input_circuit_tensor'], {}), '(input_circuit_tensor)\n', (1403, 1425), True, 'import tensorflow_quantum as tfq\n'), ((1434, 1472), 'tensorflow_quantum.from_tensor', 'tfq.from_tensor', (['output_circuit_tensor'], {}), '(output_circuit_tensor)\n', (1449, 1472), True, 'import tensorflow_quantum as tfq\n'), ((2314, 2339), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(4)'], {}), '(1, 4)\n', (2333, 2339), False, 'import cirq\n'), ((2607, 2621), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (2619, 2621), False, 'import cirq\n'), ((3297, 3311), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (3309, 3311), False, 'import cirq\n'), ((3913, 3927), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (3925, 3927), False, 'import cirq\n'), ((4812, 4826), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (4824, 4826), False, 'import cirq\n'), ((5418, 5432), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (5430, 5432), False, 'import cirq\n'), ((5862, 5876), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (5874, 5876), False, 'import cirq\n'), ((5891, 5917), 'sympy.symbols', 'sympy.symbols', (['"""qconv0:63"""'], {}), "('qconv0:63')\n", (5904, 5917), False, 'import sympy\n'), ((7131, 7154), 'tensorflow_quantum.layers.AddCircuit', 'tfq.layers.AddCircuit', ([], {}), '()\n', (7152, 7154), True, 'import tensorflow_quantum as tfq\n'), ((7910, 7928), 'tensorflow.squeeze', 'tf.squeeze', (['y_true'], {}), '(y_true)\n', (7920, 7928), True, 'import tensorflow as tf\n'), ((7942, 7994), 'tensorflow.map_fn', 'tf.map_fn', (['(lambda x: 1.0 if x >= 0 else -1.0)', 'y_pred'], {}), '(lambda x: 1.0 if x >= 0 else -1.0, y_pred)\n', (7951, 7994), True, 'import tensorflow as tf\n'), ((8905, 8916), 'cirq.Z', 'cirq.Z', (['bit'], {}), '(bit)\n', (8911, 8916), False, 'import cirq\n'), ((9090, 9104), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (9102, 9104), False, 'import cirq\n'), ((9119, 9145), 'sympy.symbols', 'sympy.symbols', (['"""qconv0:21"""'], {}), "('qconv0:21')\n", (9132, 9145), False, 'import sympy\n'), ((9517, 9540), 'tensorflow_quantum.layers.AddCircuit', 'tfq.layers.AddCircuit', ([], {}), '()\n', (9538, 9540), True, 'import tensorflow_quantum as tfq\n'), ((9756, 9780), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8)'], {}), '(8)\n', (9777, 9780), True, 'import tensorflow as tf\n'), ((9812, 9836), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (9833, 9836), True, 'import tensorflow as tf\n'), ((11148, 11171), 'tensorflow_quantum.layers.AddCircuit', 'tfq.layers.AddCircuit', ([], {}), '()\n', (11169, 11171), True, 'import tensorflow_quantum as tfq\n'), ((11879, 11903), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8)'], {}), '(8)\n', (11900, 11903), True, 'import tensorflow as tf\n'), ((11927, 11951), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (11948, 11951), True, 'import tensorflow as tf\n'), ((2122, 2162), 'tensorflow_quantum.convert_to_tensor', 'tfq.convert_to_tensor', (['train_excitations'], {}), '(train_excitations)\n', (2143, 2162), True, 'import tensorflow_quantum as tfq\n'), ((2164, 2186), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (2172, 2186), True, 'import numpy as np\n'), ((2198, 2237), 'tensorflow_quantum.convert_to_tensor', 'tfq.convert_to_tensor', (['test_excitations'], {}), '(test_excitations)\n', (2219, 2237), True, 'import tensorflow_quantum as tfq\n'), ((2239, 2260), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (2247, 2260), True, 'import numpy as np\n'), ((2357, 2387), 'tensorflow_quantum.from_tensor', 'tfq.from_tensor', (['sample_points'], {}), '(sample_points)\n', (2372, 2387), True, 'import tensorflow_quantum as tfq\n'), ((2437, 2467), 'tensorflow_quantum.from_tensor', 'tfq.from_tensor', (['sample_points'], {}), '(sample_points)\n', (2452, 2467), True, 'import tensorflow_quantum as tfq\n'), ((2641, 2661), 'cirq.H.on_each', 'cirq.H.on_each', (['bits'], {}), '(bits)\n', (2655, 2661), False, 'import cirq\n'), ((2831, 2856), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(4)'], {}), '(1, 4)\n', (2850, 2856), False, 'import cirq\n'), ((4188, 4238), 'cirq.CNOT', 'cirq.CNOT', ([], {'control': 'source_qubit', 'target': 'sink_qubit'}), '(control=source_qubit, target=sink_qubit)\n', (4197, 4238), False, 'import cirq\n'), ((4343, 4363), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', '(0)'], {}), '(0, 0)\n', (4357, 4363), False, 'import cirq\n'), ((4365, 4386), 'sympy.symbols', 'sympy.symbols', (['"""x0:3"""'], {}), "('x0:3')\n", (4378, 4386), False, 'import sympy\n'), ((4419, 4444), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(2)'], {}), '(1, 2)\n', (4438, 4444), False, 'import cirq\n'), ((4446, 4468), 'sympy.symbols', 'sympy.symbols', (['"""x0:15"""'], {}), "('x0:15')\n", (4459, 4468), False, 'import sympy\n'), ((4526, 4547), 'sympy.symbols', 'sympy.symbols', (['"""x0:6"""'], {}), "('x0:6')\n", (4539, 4547), False, 'import sympy\n'), ((5130, 5155), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(8)'], {}), '(1, 8)\n', (5149, 5155), False, 'import cirq\n'), ((5157, 5179), 'sympy.symbols', 'sympy.symbols', (['"""x0:15"""'], {}), "('x0:15')\n", (5170, 5179), False, 'import sympy\n'), ((5669, 5690), 'sympy.symbols', 'sympy.symbols', (['"""x0:6"""'], {}), "('x0:6')\n", (5682, 5690), False, 'import sympy\n'), ((8028, 8066), 'tensorflow.keras.backend.equal', 'tf.keras.backend.equal', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8050, 8066), True, 'import tensorflow as tf\n'), ((8099, 8143), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.02)'}), '(learning_rate=0.02)\n', (8123, 8143), True, 'import tensorflow as tf\n'), ((10166, 10210), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.02)'}), '(learning_rate=0.02)\n', (10190, 10210), True, 'import tensorflow as tf\n'), ((12336, 12380), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.02)'}), '(learning_rate=0.02)\n', (12360, 12380), True, 'import tensorflow as tf\n'), ((1722, 1754), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {}), '(-np.pi, np.pi)\n', (1739, 1754), True, 'import numpy as np\n'), ((2749, 2776), 'cirq.CZ', 'cirq.CZ', (['this_bit', 'next_bit'], {}), '(this_bit, next_bit)\n', (2756, 2776), False, 'import cirq\n'), ((3077, 3088), 'cirq.X', 'cirq.X', (['bit'], {}), '(bit)\n', (3083, 3088), False, 'import cirq\n'), ((3110, 3121), 'cirq.Y', 'cirq.Y', (['bit'], {}), '(bit)\n', (3116, 3121), False, 'import cirq\n'), ((3143, 3154), 'cirq.Z', 'cirq.Z', (['bit'], {}), '(bit)\n', (3149, 3154), False, 'import cirq\n'), ((3440, 3454), 'cirq.ZZ', 'cirq.ZZ', (['*bits'], {}), '(*bits)\n', (3447, 3454), False, 'import cirq\n'), ((3484, 3498), 'cirq.YY', 'cirq.YY', (['*bits'], {}), '(*bits)\n', (3491, 3498), False, 'import cirq\n'), ((3528, 3542), 'cirq.XX', 'cirq.XX', (['*bits'], {}), '(*bits)\n', (3535, 3542), False, 'import cirq\n'), ((4499, 4524), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(2)'], {}), '(1, 2)\n', (4518, 4524), False, 'import cirq\n'), ((1799, 1811), 'cirq.rx', 'cirq.rx', (['rng'], {}), '(rng)\n', (1806, 1811), False, 'import cirq\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import logging
import itertools
import numpy as np
from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned
from allel.model.ndarray import GenotypeArray
from allel.stats.window import windowed_statistic, moving_statistic
from allel.stats.diversity import mean_pairwise_difference, \
mean_pairwise_difference_between
from allel.stats.misc import jackknife
from allel.chunked import get_blen_array
logger = logging.getLogger(__name__)
debug = logger.debug
def weir_cockerham_fst(g, subpops, max_allele=None, chunked=False, blen=None):
"""Compute the variance components from the analyses of variance of
allele frequencies according to <NAME> (1984).
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
max_allele : int, optional
The highest allele index to consider.
chunked : bool, optional
If True, use a block-wise implementation to avoid loading the entire
input array into memory.
blen : int, optional
Block length to use for chunked implementation.
Returns
-------
a : ndarray, float, shape (n_variants, n_alleles)
Component of variance between populations.
b : ndarray, float, shape (n_variants, n_alleles)
Component of variance between individuals within populations.
c : ndarray, float, shape (n_variants, n_alleles)
Component of variance between gametes within individuals.
Examples
--------
Calculate variance components from some genotype data::
>>> import allel
>>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 0], [0, 0], [0, 0], [0, 0]],
... [[0, 1], [1, 2], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [0, 1], [-1, -1]]]
>>> subpops = [[0, 1], [2, 3]]
>>> a, b, c = allel.stats.weir_cockerham_fst(g, subpops)
>>> a
array([[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , -0.125, -0.125],
[-0.375, -0.375, 0. ]])
>>> b
array([[ 0. , 0. , 0. ],
[-0.25 , -0.25 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0.125 , 0.25 ],
[ 0.41666667, 0.41666667, 0. ]])
>>> c
array([[ 0. , 0. , 0. ],
[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[ 0.125 , 0.25 , 0.125 ],
[ 0.16666667, 0.16666667, 0. ]])
Estimate the parameter theta (a.k.a., Fst) for each variant
and each allele individually::
>>> fst = a / (a + b + c)
>>> fst
array([[ 1. , 1. , nan],
[ 0. , 0. , nan],
[ nan, nan, nan],
[ 0. , -0.5, -0.5],
[-1.8, -1.8, nan]])
Estimate Fst for each variant individually (averaging over alleles)::
>>> fst = (np.sum(a, axis=1) /
... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1)))
>>> fst
array([ 1. , 0. , nan, -0.4, -1.8])
Estimate Fst averaging over all variants and alleles::
>>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c))
>>> fst
-4.3680905886891398e-17
Note that estimated Fst values may be negative.
"""
# check inputs
if not hasattr(g, 'shape') or not hasattr(g, 'ndim'):
g = GenotypeArray(g, copy=False)
if g.ndim != 3:
raise ValueError('g must have three dimensions')
if g.shape[2] != 2:
raise NotImplementedError('only diploid genotypes are supported')
# determine highest allele index
if max_allele is None:
max_allele = g.max()
if chunked:
# use a block-wise implementation
blen = get_blen_array(g, blen)
n_variants = g.shape[0]
shape = (n_variants, max_allele + 1)
a = np.zeros(shape, dtype='f8')
b = np.zeros(shape, dtype='f8')
c = np.zeros(shape, dtype='f8')
for i in range(0, n_variants, blen):
j = min(n_variants, i+blen)
gb = g[i:j]
ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele)
a[i:j] = ab
b[i:j] = bb
c[i:j] = cb
else:
a, b, c = _weir_cockerham_fst(g, subpops, max_allele)
return a, b, c
# noinspection PyPep8Naming
def _weir_cockerham_fst(g, subpops, max_allele):
# check inputs
g = GenotypeArray(g, copy=False)
n_variants, n_samples, ploidy = g.shape
n_alleles = max_allele + 1
# number of populations sampled
r = len(subpops)
n_populations = r
debug('r: %r', r)
# count alleles within each subpopulation
ac = [g.count_alleles(subpop=s, max_allele=max_allele) for s in subpops]
# stack allele counts from each sub-population into a single array
ac = np.dstack(ac)
assert ac.shape == (n_variants, n_alleles, n_populations)
debug('ac: %s, %r', ac.shape, ac)
# count number of alleles called within each population by summing
# allele counts along the alleles dimension
an = np.sum(ac, axis=1)
assert an.shape == (n_variants, n_populations)
debug('an: %s, %r', an.shape, an)
# compute number of individuals sampled from each population
n = an // 2
assert n.shape == (n_variants, n_populations)
debug('n: %s, %r', n.shape, n)
# compute the total number of individuals sampled across all populations
n_total = np.sum(n, axis=1)
assert n_total.shape == (n_variants,)
debug('n_total: %s, %r', n_total.shape, n_total)
# compute the average sample size across populations
n_bar = np.mean(n, axis=1)
assert n_bar.shape == (n_variants,)
debug('n_bar: %s, %r', n_bar.shape, n_bar)
# compute the term n sub C incorporating the coefficient of variation in
# sample sizes
n_C = (n_total - (np.sum(n**2, axis=1) / n_total)) / (r - 1)
assert n_C.shape == (n_variants,)
debug('n_C: %s, %r', n_C.shape, n_C)
# compute allele frequencies within each population
p = ac / an[:, np.newaxis, :]
assert p.shape == (n_variants, n_alleles, n_populations)
debug('p: %s, %r', p.shape, p)
# compute the average sample frequency of each allele
ac_total = np.sum(ac, axis=2)
an_total = np.sum(an, axis=1)
p_bar = ac_total / an_total[:, np.newaxis]
assert p_bar.shape == (n_variants, n_alleles)
debug('p_bar: %s, %r', p_bar.shape, p_bar)
# add in some extra dimensions to enable broadcasting
n_bar = n_bar[:, np.newaxis]
n_C = n_C[:, np.newaxis]
n = n[:, np.newaxis, :]
p_bar = p_bar[:, :, np.newaxis]
# compute the sample variance of allele frequencies over populations
s_squared = (
np.sum(n * ((p - p_bar) ** 2),
axis=2) /
(n_bar * (r - 1))
)
assert s_squared.shape == (n_variants, n_alleles)
debug('s_squared: %s, %r', s_squared.shape, s_squared)
# remove extra dimensions for correct broadcasting
p_bar = p_bar[:, :, 0]
# compute the average heterozygosity over all populations
# N.B., take only samples in subpops of interest
gs = g.take(list(itertools.chain(*subpops)), axis=1)
h_bar = [gs.count_het(allele=allele, axis=1) / n_total
for allele in range(n_alleles)]
h_bar = np.column_stack(h_bar)
assert h_bar.shape == (n_variants, n_alleles)
debug('h_bar: %s, %r', h_bar.shape, h_bar)
# now comes the tricky bit...
# component of variance between populations
a = ((n_bar / n_C) *
(s_squared -
((1 / (n_bar - 1)) *
((p_bar * (1 - p_bar)) -
((r - 1) * s_squared / r) -
(h_bar / 4)))))
assert a.shape == (n_variants, n_alleles)
# component of variance between individuals within populations
b = ((n_bar / (n_bar - 1)) *
((p_bar * (1 - p_bar)) -
((r - 1) * s_squared / r) -
(((2 * n_bar) - 1) * h_bar / (4 * n_bar))))
assert b.shape == (n_variants, n_alleles)
# component of variance between gametes within individuals
c = h_bar / 2
assert c.shape == (n_variants, n_alleles)
return a, b, c
def hudson_fst(ac1, ac2, fill=np.nan):
"""Calculate the numerator and denominator for Fst estimation using the
method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
num : ndarray, float, shape (n_variants,)
Divergence between the two populations minus average
of diversity within each population.
den : ndarray, float, shape (n_variants,)
Divergence between the two populations.
Examples
--------
Calculate numerator and denominator for Fst estimation::
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 0], [0, 0], [0, 0], [0, 0]],
... [[0, 1], [1, 2], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [0, 1], [-1, -1]]])
>>> subpops = [[0, 1], [2, 3]]
>>> ac1 = g.count_alleles(subpop=subpops[0])
>>> ac2 = g.count_alleles(subpop=subpops[1])
>>> num, den = allel.stats.hudson_fst(ac1, ac2)
>>> num
array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333])
>>> den
array([ 1. , 0.5 , 0. , 0.625, 0.5 ])
Estimate Fst for each variant individually::
>>> fst = num / den
>>> fst
array([ 1. , -0.33333333, nan, -0.2 , -0.66666667])
Estimate Fst averaging over variants::
>>> fst = np.sum(num) / np.sum(den)
>>> fst
0.1428571428571429
""" # flake8: noqa
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# calculate these once only
an1 = np.sum(ac1, axis=1)
an2 = np.sum(ac2, axis=1)
# calculate average diversity (a.k.a. heterozygosity) within each
# population
within = (mean_pairwise_difference(ac1, an1, fill=fill) +
mean_pairwise_difference(ac2, an2, fill=fill)) / 2
# calculate divergence (a.k.a. heterozygosity) between each population
between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill)
# define numerator and denominator for Fst calculations
num = between - within
den = between
return num, den
def patterson_fst(aca, acb):
"""Estimator of differentiation between populations A and B based on the
F2 parameter.
Parameters
----------
aca : array_like, int, shape (n_variants, 2)
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
Returns
-------
num : ndarray, shape (n_variants,), float
Numerator.
den : ndarray, shape (n_variants,), float
Denominator.
Notes
-----
See Patterson (2012), Appendix A.
TODO check if this is numerically equivalent to Hudson's estimator.
"""
from allel.stats.admixture import patterson_f2, h_hat
num = patterson_f2(aca, acb)
den = num + h_hat(aca) + h_hat(acb)
return num, den
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None,
stop=None, step=None, windows=None,
fill=np.nan, max_allele=None):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Weir and Cockerham (1984).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
size : int
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variant
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# define the statistic to compute within each window
def average_fst(wa, wb, wc):
return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc))
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(a, b, c),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = hudson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def windowed_patterson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Patterson (2012).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = patterson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def blockwise_weir_cockerham_fst(g, subpops, blen, max_allele=None):
"""Estimate average Fst and standard error using the block-jackknife.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
blen : int
Block size (number of variants).
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# calculate overall estimate
a_sum = np.nansum(a)
b_sum = np.nansum(b)
c_sum = np.nansum(c)
fst = a_sum / (a_sum + b_sum + c_sum)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(a, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
def blockwise_hudson_fst(ac1, ac2, blen):
"""Estimate average Fst between two populations and standard error using
the block-jackknife.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
blen : int
Block size (number of variants).
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
num, den = hudson_fst(ac1, ac2, fill=np.nan)
# calculate overall estimate
fst = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
def blockwise_patterson_fst(ac1, ac2, blen):
"""Estimate average Fst between two populations and standard error using
the block-jackknife.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
blen : int
Block size (number of variants).
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
num, den = patterson_fst(ac1, ac2)
# calculate overall estimate
fst = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
| [
"logging.getLogger",
"itertools.chain",
"numpy.column_stack",
"allel.stats.admixture.h_hat",
"allel.stats.diversity.mean_pairwise_difference",
"allel.stats.diversity.mean_pairwise_difference_between",
"allel.chunked.get_blen_array",
"numpy.mean",
"allel.util.check_dim0_aligned",
"allel.stats.windo... | [((527, 554), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (544, 554), False, 'import logging\n'), ((4899, 4927), 'allel.model.ndarray.GenotypeArray', 'GenotypeArray', (['g'], {'copy': '(False)'}), '(g, copy=False)\n', (4912, 4927), False, 'from allel.model.ndarray import GenotypeArray\n'), ((5310, 5323), 'numpy.dstack', 'np.dstack', (['ac'], {}), '(ac)\n', (5319, 5323), True, 'import numpy as np\n'), ((5553, 5571), 'numpy.sum', 'np.sum', (['ac'], {'axis': '(1)'}), '(ac, axis=1)\n', (5559, 5571), True, 'import numpy as np\n'), ((5920, 5937), 'numpy.sum', 'np.sum', (['n'], {'axis': '(1)'}), '(n, axis=1)\n', (5926, 5937), True, 'import numpy as np\n'), ((6103, 6121), 'numpy.mean', 'np.mean', (['n'], {'axis': '(1)'}), '(n, axis=1)\n', (6110, 6121), True, 'import numpy as np\n'), ((6711, 6729), 'numpy.sum', 'np.sum', (['ac'], {'axis': '(2)'}), '(ac, axis=2)\n', (6717, 6729), True, 'import numpy as np\n'), ((6745, 6763), 'numpy.sum', 'np.sum', (['an'], {'axis': '(1)'}), '(an, axis=1)\n', (6751, 6763), True, 'import numpy as np\n'), ((7766, 7788), 'numpy.column_stack', 'np.column_stack', (['h_bar'], {}), '(h_bar)\n', (7781, 7788), True, 'import numpy as np\n'), ((10654, 10674), 'allel.util.asarray_ndim', 'asarray_ndim', (['ac1', '(2)'], {}), '(ac1, 2)\n', (10666, 10674), False, 'from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned\n'), ((10685, 10705), 'allel.util.asarray_ndim', 'asarray_ndim', (['ac2', '(2)'], {}), '(ac2, 2)\n', (10697, 10705), False, 'from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned\n'), ((10710, 10738), 'allel.util.check_dim0_aligned', 'check_dim0_aligned', (['ac1', 'ac2'], {}), '(ac1, ac2)\n', (10728, 10738), False, 'from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned\n'), ((10754, 10783), 'allel.util.ensure_dim1_aligned', 'ensure_dim1_aligned', (['ac1', 'ac2'], {}), '(ac1, ac2)\n', (10773, 10783), False, 'from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned\n'), ((10827, 10846), 'numpy.sum', 'np.sum', (['ac1'], {'axis': '(1)'}), '(ac1, axis=1)\n', (10833, 10846), True, 'import numpy as np\n'), ((10857, 10876), 'numpy.sum', 'np.sum', (['ac2'], {'axis': '(1)'}), '(ac2, axis=1)\n', (10863, 10876), True, 'import numpy as np\n'), ((11182, 11245), 'allel.stats.diversity.mean_pairwise_difference_between', 'mean_pairwise_difference_between', (['ac1', 'ac2', 'an1', 'an2'], {'fill': 'fill'}), '(ac1, ac2, an1, an2, fill=fill)\n', (11214, 11245), False, 'from allel.stats.diversity import mean_pairwise_difference, mean_pairwise_difference_between\n'), ((12077, 12099), 'allel.stats.admixture.patterson_f2', 'patterson_f2', (['aca', 'acb'], {}), '(aca, acb)\n', (12089, 12099), False, 'from allel.stats.admixture import patterson_f2, h_hat\n'), ((14326, 14468), 'allel.stats.window.windowed_statistic', 'windowed_statistic', (['pos'], {'values': '(a, b, c)', 'statistic': 'average_fst', 'size': 'size', 'start': 'start', 'stop': 'stop', 'step': 'step', 'windows': 'windows', 'fill': 'fill'}), '(pos, values=(a, b, c), statistic=average_fst, size=size,\n start=start, stop=stop, step=step, windows=windows, fill=fill)\n', (14344, 14468), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((16714, 16857), 'allel.stats.window.windowed_statistic', 'windowed_statistic', (['pos'], {'values': '(num, den)', 'statistic': 'average_fst', 'size': 'size', 'start': 'start', 'stop': 'stop', 'step': 'step', 'windows': 'windows', 'fill': 'fill'}), '(pos, values=(num, den), statistic=average_fst, size=size,\n start=start, stop=stop, step=step, windows=windows, fill=fill)\n', (16732, 16857), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((19082, 19225), 'allel.stats.window.windowed_statistic', 'windowed_statistic', (['pos'], {'values': '(num, den)', 'statistic': 'average_fst', 'size': 'size', 'start': 'start', 'stop': 'stop', 'step': 'step', 'windows': 'windows', 'fill': 'fill'}), '(pos, values=(num, den), statistic=average_fst, size=size,\n start=start, stop=stop, step=step, windows=windows, fill=fill)\n', (19100, 19225), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((20429, 20441), 'numpy.nansum', 'np.nansum', (['a'], {}), '(a)\n', (20438, 20441), True, 'import numpy as np\n'), ((20454, 20466), 'numpy.nansum', 'np.nansum', (['b'], {}), '(b)\n', (20463, 20466), True, 'import numpy as np\n'), ((20479, 20491), 'numpy.nansum', 'np.nansum', (['c'], {}), '(c)\n', (20488, 20491), True, 'import numpy as np\n'), ((20612, 20663), 'allel.stats.window.moving_statistic', 'moving_statistic', (['a'], {'statistic': 'np.nansum', 'size': 'blen'}), '(a, statistic=np.nansum, size=blen)\n', (20628, 20663), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((20679, 20738), 'allel.stats.window.moving_statistic', 'moving_statistic', (['(a + b + c)'], {'statistic': 'np.nansum', 'size': 'blen'}), '(a + b + c, statistic=np.nansum, size=blen)\n', (20695, 20738), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((22049, 22102), 'allel.stats.window.moving_statistic', 'moving_statistic', (['num'], {'statistic': 'np.nansum', 'size': 'blen'}), '(num, statistic=np.nansum, size=blen)\n', (22065, 22102), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((22118, 22171), 'allel.stats.window.moving_statistic', 'moving_statistic', (['den'], {'statistic': 'np.nansum', 'size': 'blen'}), '(den, statistic=np.nansum, size=blen)\n', (22134, 22171), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((23475, 23528), 'allel.stats.window.moving_statistic', 'moving_statistic', (['num'], {'statistic': 'np.nansum', 'size': 'blen'}), '(num, statistic=np.nansum, size=blen)\n', (23491, 23528), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((23544, 23597), 'allel.stats.window.moving_statistic', 'moving_statistic', (['den'], {'statistic': 'np.nansum', 'size': 'blen'}), '(den, statistic=np.nansum, size=blen)\n', (23560, 23597), False, 'from allel.stats.window import windowed_statistic, moving_statistic\n'), ((3855, 3883), 'allel.model.ndarray.GenotypeArray', 'GenotypeArray', (['g'], {'copy': '(False)'}), '(g, copy=False)\n', (3868, 3883), False, 'from allel.model.ndarray import GenotypeArray\n'), ((4227, 4250), 'allel.chunked.get_blen_array', 'get_blen_array', (['g', 'blen'], {}), '(g, blen)\n', (4241, 4250), False, 'from allel.chunked import get_blen_array\n'), ((4340, 4367), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""f8"""'}), "(shape, dtype='f8')\n", (4348, 4367), True, 'import numpy as np\n'), ((4380, 4407), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""f8"""'}), "(shape, dtype='f8')\n", (4388, 4407), True, 'import numpy as np\n'), ((4420, 4447), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': '"""f8"""'}), "(shape, dtype='f8')\n", (4428, 4447), True, 'import numpy as np\n'), ((7193, 7229), 'numpy.sum', 'np.sum', (['(n * (p - p_bar) ** 2)'], {'axis': '(2)'}), '(n * (p - p_bar) ** 2, axis=2)\n', (7199, 7229), True, 'import numpy as np\n'), ((12129, 12139), 'allel.stats.admixture.h_hat', 'h_hat', (['acb'], {}), '(acb)\n', (12134, 12139), False, 'from allel.stats.admixture import patterson_f2, h_hat\n'), ((21939, 21953), 'numpy.nansum', 'np.nansum', (['num'], {}), '(num)\n', (21948, 21953), True, 'import numpy as np\n'), ((21956, 21970), 'numpy.nansum', 'np.nansum', (['den'], {}), '(den)\n', (21965, 21970), True, 'import numpy as np\n'), ((23365, 23379), 'numpy.nansum', 'np.nansum', (['num'], {}), '(num)\n', (23374, 23379), True, 'import numpy as np\n'), ((23382, 23396), 'numpy.nansum', 'np.nansum', (['den'], {}), '(den)\n', (23391, 23396), True, 'import numpy as np\n'), ((7614, 7639), 'itertools.chain', 'itertools.chain', (['*subpops'], {}), '(*subpops)\n', (7629, 7639), False, 'import itertools\n'), ((10979, 11024), 'allel.stats.diversity.mean_pairwise_difference', 'mean_pairwise_difference', (['ac1', 'an1'], {'fill': 'fill'}), '(ac1, an1, fill=fill)\n', (11003, 11024), False, 'from allel.stats.diversity import mean_pairwise_difference, mean_pairwise_difference_between\n'), ((11041, 11086), 'allel.stats.diversity.mean_pairwise_difference', 'mean_pairwise_difference', (['ac2', 'an2'], {'fill': 'fill'}), '(ac2, an2, fill=fill)\n', (11065, 11086), False, 'from allel.stats.diversity import mean_pairwise_difference, mean_pairwise_difference_between\n'), ((12116, 12126), 'allel.stats.admixture.h_hat', 'h_hat', (['aca'], {}), '(aca)\n', (12121, 12126), False, 'from allel.stats.admixture import patterson_f2, h_hat\n'), ((14195, 14208), 'numpy.nansum', 'np.nansum', (['wa'], {}), '(wa)\n', (14204, 14208), True, 'import numpy as np\n'), ((16617, 16630), 'numpy.nansum', 'np.nansum', (['wn'], {}), '(wn)\n', (16626, 16630), True, 'import numpy as np\n'), ((16633, 16646), 'numpy.nansum', 'np.nansum', (['wd'], {}), '(wd)\n', (16642, 16646), True, 'import numpy as np\n'), ((18985, 18998), 'numpy.nansum', 'np.nansum', (['wn'], {}), '(wn)\n', (18994, 18998), True, 'import numpy as np\n'), ((19001, 19014), 'numpy.nansum', 'np.nansum', (['wd'], {}), '(wd)\n', (19010, 19014), True, 'import numpy as np\n'), ((6328, 6350), 'numpy.sum', 'np.sum', (['(n ** 2)'], {'axis': '(1)'}), '(n ** 2, axis=1)\n', (6334, 6350), True, 'import numpy as np\n'), ((14244, 14257), 'numpy.nansum', 'np.nansum', (['wc'], {}), '(wc)\n', (14253, 14257), True, 'import numpy as np\n'), ((14212, 14225), 'numpy.nansum', 'np.nansum', (['wa'], {}), '(wa)\n', (14221, 14225), True, 'import numpy as np\n'), ((14228, 14241), 'numpy.nansum', 'np.nansum', (['wb'], {}), '(wb)\n', (14237, 14241), True, 'import numpy as np\n'), ((20948, 20957), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (20954, 20957), True, 'import numpy as np\n'), ((20960, 20969), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (20966, 20969), True, 'import numpy as np\n'), ((22381, 22390), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (22387, 22390), True, 'import numpy as np\n'), ((22393, 22402), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (22399, 22402), True, 'import numpy as np\n'), ((23807, 23816), 'numpy.sum', 'np.sum', (['n'], {}), '(n)\n', (23813, 23816), True, 'import numpy as np\n'), ((23819, 23828), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (23825, 23828), True, 'import numpy as np\n')] |
from numpy import ones
from eliot import log_call, to_file
to_file(open('eliot.log', 'w'))
@log_call
def palavras(split=False):
with open('br-utf8.txt') as file:
if split:
text = file.read().split('\n')
else:
text = file.readlines()[:50]
return text
@log_call
def numpy_array():
a = ones((100, 100, 100))
# b = a.tolist()
return a #, b
# p1 = palavras(True)
p2 = palavras(False)
# na = numpy_array()
| [
"numpy.ones"
] | [((340, 361), 'numpy.ones', 'ones', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (344, 361), False, 'from numpy import ones\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch.nn as nn
from mmcv.cnn import build_conv_layer, constant_init, kaiming_init
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmpose.core import (WeightNormClipHook, compute_similarity_transform,
fliplr_regression)
from mmpose.models.builder import HEADS, build_loss
@HEADS.register_module()
class TemporalRegressionHead(nn.Module):
"""Regression head of VideoPose3D.
Paper ref: <NAME>.
``3D human pose estimation in video with temporal convolutions and
semi-supervised training``
Args:
in_channels (int): Number of input channels
num_joints (int): Number of joints
loss_keypoint (dict): Config for keypoint loss. Default: None.
max_norm (float|None): if not None, the weight of convolution layers
will be clipped to have a maximum norm of max_norm.
is_trajectory (bool): If the model only predicts root joint
position, then this arg should be set to True. In this case,
traj_loss will be calculated. Otherwise, it should be set to
False. Default: False.
"""
def __init__(self,
in_channels,
num_joints,
max_norm=None,
loss_keypoint=None,
is_trajectory=False,
train_cfg=None,
test_cfg=None):
super().__init__()
self.in_channels = in_channels
self.num_joints = num_joints
self.max_norm = max_norm
self.loss = build_loss(loss_keypoint)
self.is_trajectory = is_trajectory
if self.is_trajectory:
assert self.num_joints == 1
self.train_cfg = {} if train_cfg is None else train_cfg
self.test_cfg = {} if test_cfg is None else test_cfg
self.conv = build_conv_layer(
dict(type='Conv1d'), in_channels, num_joints * 3, 1)
if self.max_norm is not None:
# Apply weight norm clip to conv layers
weight_clip = WeightNormClipHook(self.max_norm)
for module in self.modules():
if isinstance(module, nn.modules.conv._ConvNd):
weight_clip.register(module)
@staticmethod
def _transform_inputs(x):
"""Transform inputs for decoder.
Args:
inputs (tuple/list of Tensor | Tensor): multi-level features.
Returns:
Tensor: The transformed inputs
"""
if not isinstance(x, (list, tuple)):
return x
assert len(x) > 0
# return the top-level feature of the 1D feature pyramid
return x[-1]
def forward(self, x):
"""Forward function."""
x = self._transform_inputs(x)
assert x.ndim == 3 and x.shape[2] == 1, f'Invalid shape {x.shape}'
output = self.conv(x)
N = output.shape[0]
ret = output.reshape(N, self.num_joints, 3)
output_ = ret.detach().cpu().numpy()
return ret
def get_loss(self, output, target, target_weight):
"""Calculate keypoint loss.
Note:
batch_size: N
num_keypoints: K
Args:
output (torch.Tensor[N, K, 3]): Output keypoints.
target (torch.Tensor[N, K, 3]): Target keypoints.
target_weight (torch.Tensor[N, K, 3]):
Weights across different joint types.
If self.is_trajectory is True and target_weight is None,
target_weight will be set inversely proportional to joint
depth.
"""
losses = dict()
assert not isinstance(self.loss, nn.Sequential)
# trajectory model
if self.is_trajectory:
if target.dim() == 2:
target.unsqueeze_(1)
if target_weight is None:
target_weight = (1 / target[:, :, 2:]).expand(target.shape)
assert target.dim() == 3 and target_weight.dim() == 3
losses['traj_loss'] = self.loss(output, target, target_weight)
# pose model
else:
if target_weight is None:
target_weight = target.new_ones(target.shape)
assert target.dim() == 3 and target_weight.dim() == 3
losses['reg_loss'] = self.loss(output, target, target_weight)
return losses
def get_accuracy(self, output, target, target_weight, metas):
"""Calculate accuracy for keypoint loss.
Note:
batch_size: N
num_keypoints: K
Args:
output (torch.Tensor[N, K, 3]): Output keypoints.
target (torch.Tensor[N, K, 3]): Target keypoints.
target_weight (torch.Tensor[N, K, 3]):
Weights across different joint types.
metas (list(dict)): Information about data augmentation including:
- target_image_path (str): Optional, path to the image file
- target_mean (float): Optional, normalization parameter of
the target pose.
- target_std (float): Optional, normalization parameter of the
target pose.
- root_position (np.ndarray[3,1]): Optional, global
position of the root joint.
- root_index (torch.ndarray[1,]): Optional, original index of
the root joint before root-centering.
"""
accuracy = dict()
N = output.shape[0]
output_ = output.detach().cpu().numpy()
target_ = target.detach().cpu().numpy()
# Denormalize the predicted pose
if 'target_mean' in metas[0] and 'target_std' in metas[0]:
target_mean = np.stack([m['target_mean'] for m in metas])
target_std = np.stack([m['target_std'] for m in metas])
output_ = self._denormalize_joints(output_, target_mean,
target_std)
target_ = self._denormalize_joints(target_, target_mean,
target_std)
# Restore global position
if self.test_cfg.get('restore_global_position', False):
root_pos = np.stack([m['root_position'] for m in metas])
root_idx = metas[0].get('root_position_index', None)
output_ = self._restore_global_position(output_, root_pos,
root_idx)
target_ = self._restore_global_position(target_, root_pos,
root_idx)
# Get target weight
if target_weight is None:
target_weight_ = np.ones_like(target_)
else:
target_weight_ = target_weight.detach().cpu().numpy()
if self.test_cfg.get('restore_global_position', False):
root_idx = metas[0].get('root_position_index', None)
root_weight = metas[0].get('root_joint_weight', 1.0)
target_weight_ = self._restore_root_target_weight(
target_weight_, root_weight, root_idx)
mpjpe = np.mean(
np.linalg.norm((output_ - target_) * target_weight_, axis=-1))
transformed_output = np.zeros_like(output_)
for i in range(N):
transformed_output[i, :, :] = compute_similarity_transform(
output_[i, :, :], target_[i, :, :])
p_mpjpe = np.mean(
np.linalg.norm(
(transformed_output - target_) * target_weight_, axis=-1))
accuracy['mpjpe'] = output.new_tensor(mpjpe)
accuracy['p_mpjpe'] = output.new_tensor(p_mpjpe)
return accuracy
def inference_model(self, x, flip_pairs=None):
"""Inference function.
Returns:
output_regression (np.ndarray): Output regression.
Args:
x (torch.Tensor[N, K, 2]): Input features.
flip_pairs (None | list[tuple()):
Pairs of keypoints which are mirrored.
"""
output = self.forward(x)
if flip_pairs is not None:
output_regression = fliplr_regression(
output.detach().cpu().numpy(),
flip_pairs,
center_mode='static',
center_x=0)
else:
output_regression = output.detach().cpu().numpy()
return output_regression
def decode(self, metas, output):
"""Decode the keypoints from output regression.
Args:
metas (list(dict)): Information about data augmentation.
By default this includes:
- "target_image_path": path to the image file
output (np.ndarray[N, K, 3]): predicted regression vector.
metas (list(dict)): Information about data augmentation including:
- target_image_path (str): Optional, path to the image file
- target_mean (float): Optional, normalization parameter of
the target pose.
- target_std (float): Optional, normalization parameter of the
target pose.
- root_position (np.ndarray[3,1]): Optional, global
position of the root joint.
- root_index (torch.ndarray[1,]): Optional, original index of
the root joint before root-centering.
"""
# Denormalize the predicted pose
if 'target_mean' in metas[0] and 'target_std' in metas[0]:
target_mean = np.stack([m['target_mean'] for m in metas])
target_std = np.stack([m['target_std'] for m in metas])
output = self._denormalize_joints(output, target_mean, target_std)
# Restore global position
if self.test_cfg.get('restore_global_position', False):
root_pos = np.stack([m['root_position'] for m in metas])
root_idx = metas[0].get('root_position_index', None)
output = self._restore_global_position(output, root_pos, root_idx)
target_image_paths = [m.get('target_image_path', None) for m in metas]
result = {'preds': output, 'target_image_paths': target_image_paths}
return result
@staticmethod
def _denormalize_joints(x, mean, std):
"""Denormalize joint coordinates with given statistics mean and std.
Args:
x (np.ndarray[N, K, 3]): Normalized joint coordinates.
mean (np.ndarray[K, 3]): Mean value.
std (np.ndarray[K, 3]): Std value.
"""
assert x.ndim == 3
assert x.shape == mean.shape == std.shape
return x * std + mean
@staticmethod
def _restore_global_position(x, root_pos, root_idx=None):
"""Restore global position of the root-centered joints.
Args:
x (np.ndarray[N, K, 3]): root-centered joint coordinates
root_pos (np.ndarray[N,1,3]): The global position of the
root joint.
root_idx (int|None): If not none, the root joint will be inserted
back to the pose at the given index.
"""
x = x + root_pos
if root_idx is not None:
x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)
return x
@staticmethod
def _restore_root_target_weight(target_weight, root_weight, root_idx=None):
"""Restore the target weight of the root joint after the restoration of
the global position.
Args:
target_weight (np.ndarray[N, K, 1]): Target weight of relativized
joints.
root_weight (float): The target weight value of the root joint.
root_idx (int|None): If not none, the root joint weight will be
inserted back to the target weight at the given index.
"""
if root_idx is not None:
root_weight = np.full(
target_weight.shape[0], root_weight, dtype=target_weight.dtype)
target_weight = np.insert(
target_weight, root_idx, root_weight[:, None], axis=1)
return target_weight
def init_weights(self):
"""Initialize the weights."""
for m in self.modules():
if isinstance(m, nn.modules.conv._ConvNd):
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
| [
"numpy.insert",
"numpy.ones_like",
"mmcv.cnn.kaiming_init",
"mmpose.models.builder.build_loss",
"numpy.full",
"mmpose.models.builder.HEADS.register_module",
"mmpose.core.WeightNormClipHook",
"numpy.stack",
"numpy.linalg.norm",
"mmcv.cnn.constant_init",
"mmpose.core.compute_similarity_transform",... | [((381, 404), 'mmpose.models.builder.HEADS.register_module', 'HEADS.register_module', ([], {}), '()\n', (402, 404), False, 'from mmpose.models.builder import HEADS, build_loss\n'), ((1605, 1630), 'mmpose.models.builder.build_loss', 'build_loss', (['loss_keypoint'], {}), '(loss_keypoint)\n', (1615, 1630), False, 'from mmpose.models.builder import HEADS, build_loss\n'), ((7296, 7318), 'numpy.zeros_like', 'np.zeros_like', (['output_'], {}), '(output_)\n', (7309, 7318), True, 'import numpy as np\n'), ((2092, 2125), 'mmpose.core.WeightNormClipHook', 'WeightNormClipHook', (['self.max_norm'], {}), '(self.max_norm)\n', (2110, 2125), False, 'from mmpose.core import WeightNormClipHook, compute_similarity_transform, fliplr_regression\n'), ((5773, 5816), 'numpy.stack', 'np.stack', (["[m['target_mean'] for m in metas]"], {}), "([m['target_mean'] for m in metas])\n", (5781, 5816), True, 'import numpy as np\n'), ((5842, 5884), 'numpy.stack', 'np.stack', (["[m['target_std'] for m in metas]"], {}), "([m['target_std'] for m in metas])\n", (5850, 5884), True, 'import numpy as np\n'), ((6263, 6308), 'numpy.stack', 'np.stack', (["[m['root_position'] for m in metas]"], {}), "([m['root_position'] for m in metas])\n", (6271, 6308), True, 'import numpy as np\n'), ((6731, 6752), 'numpy.ones_like', 'np.ones_like', (['target_'], {}), '(target_)\n', (6743, 6752), True, 'import numpy as np\n'), ((7203, 7264), 'numpy.linalg.norm', 'np.linalg.norm', (['((output_ - target_) * target_weight_)'], {'axis': '(-1)'}), '((output_ - target_) * target_weight_, axis=-1)\n', (7217, 7264), True, 'import numpy as np\n'), ((7388, 7452), 'mmpose.core.compute_similarity_transform', 'compute_similarity_transform', (['output_[i, :, :]', 'target_[i, :, :]'], {}), '(output_[i, :, :], target_[i, :, :])\n', (7416, 7452), False, 'from mmpose.core import WeightNormClipHook, compute_similarity_transform, fliplr_regression\n'), ((7509, 7581), 'numpy.linalg.norm', 'np.linalg.norm', (['((transformed_output - target_) * target_weight_)'], {'axis': '(-1)'}), '((transformed_output - target_) * target_weight_, axis=-1)\n', (7523, 7581), True, 'import numpy as np\n'), ((9585, 9628), 'numpy.stack', 'np.stack', (["[m['target_mean'] for m in metas]"], {}), "([m['target_mean'] for m in metas])\n", (9593, 9628), True, 'import numpy as np\n'), ((9654, 9696), 'numpy.stack', 'np.stack', (["[m['target_std'] for m in metas]"], {}), "([m['target_std'] for m in metas])\n", (9662, 9696), True, 'import numpy as np\n'), ((9898, 9943), 'numpy.stack', 'np.stack', (["[m['root_position'] for m in metas]"], {}), "([m['root_position'] for m in metas])\n", (9906, 9943), True, 'import numpy as np\n'), ((11936, 12007), 'numpy.full', 'np.full', (['target_weight.shape[0]', 'root_weight'], {'dtype': 'target_weight.dtype'}), '(target_weight.shape[0], root_weight, dtype=target_weight.dtype)\n', (11943, 12007), True, 'import numpy as np\n'), ((12053, 12117), 'numpy.insert', 'np.insert', (['target_weight', 'root_idx', 'root_weight[:, None]'], {'axis': '(1)'}), '(target_weight, root_idx, root_weight[:, None], axis=1)\n', (12062, 12117), True, 'import numpy as np\n'), ((12335, 12386), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['m'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m, mode='fan_in', nonlinearity='relu')\n", (12347, 12386), False, 'from mmcv.cnn import build_conv_layer, constant_init, kaiming_init\n'), ((12447, 12466), 'mmcv.cnn.constant_init', 'constant_init', (['m', '(1)'], {}), '(m, 1)\n', (12460, 12466), False, 'from mmcv.cnn import build_conv_layer, constant_init, kaiming_init\n')] |
# ******************************************************************************
# Copyright (c) 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ******************************************************************************
import pytest
import numpy as np
from skipp.filters import (gaussian, laplace, median)
from skipp.filters import (sobel, sobel_v, sobel_h)
from skipp.filters import (prewitt, prewitt_v, prewitt_h)
from skipp.morphology import (erosion, dilation)
from skipp.transform import (resize, rotate, warp)
from skipp.transform import AffineTransform
ROUNDS=30
ITERATIONS=1
IMAGE_DTYPE=np.float32
IMAGE_NUM_CHANNELS=1
SRC_IMAGE_WIDTH=4000
SRC_IMAGE_HEIGHT=4000
DST_IMAGE_WIDTH=20000
DST_IMAGE_HEIGHT=20000
def get_image_data(image_dtype=IMAGE_DTYPE,
shape=(SRC_IMAGE_WIDTH, SRC_IMAGE_HEIGHT),
number_of_channels=IMAGE_NUM_CHANNELS):
if number_of_channels==3 or number_of_channels==4:
shape=(SRC_IMAGE_WIDTH, SRC_IMAGE_HEIGHT, number_of_channels)
elif number_of_channels != 1:
raise ValueError("No test suits for provided "\
"number_of_channels: {}".format(number_of_channels))
if image_dtype==np.float32:
image = np.random.random(shape).astype(image_dtype)
elif image_dtype==np.uint8 or image_dtype==np.uint16 or image_dtype==np.int16:
image = np.random.random(shape).astype(image_dtype)
np.random.randint(255, size=None, dtype=image_dtype)
else:
raise ValueError("No test suits for provided dtype: {}".format(image_dtype))
return image
@pytest.mark.parametrize("sigma", [pytest.param(1, id="1.0"),
pytest.param(10, id="10.0")])
def test_gaussian(benchmark, sigma):
image = get_image_data()
result = benchmark.pedantic(target=gaussian, args=(image, sigma),
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("function",[pytest.param(sobel, id="sobel"),
pytest.param(sobel_v, id="sobel_v"),
pytest.param(sobel_h, id="sobel_h"),
pytest.param(prewitt, id="prewitt"),
pytest.param(prewitt_v, id="prewitt_v"),
pytest.param(prewitt_h, id="prewitt_h"),
pytest.param(laplace, id="laplace"),])
def test_edges(benchmark, function):
image = get_image_data(image_dtype=IMAGE_DTYPE,
shape=(13000, 13000),
number_of_channels=IMAGE_NUM_CHANNELS)
result = benchmark.pedantic(target=function, args=(image, None),
rounds=ROUNDS, iterations=ITERATIONS)
def test_median(benchmark):
image = get_image_data()
result = benchmark.pedantic(target=median, args=(image, None),
kwargs={'behavior':"ipp"},
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("function",[erosion, dilation],
ids=["erosion", "dilation"])
def test_morphology(benchmark, function):
image = get_image_data()
result = benchmark.pedantic(target=function, args=(image, None),
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("anti_aliasing", [False])
@pytest.mark.parametrize("order", [0, 1, 3])
def test_resize(benchmark, anti_aliasing, order):
output_shape = (DST_IMAGE_WIDTH, DST_IMAGE_HEIGHT)
image = get_image_data()
result = benchmark.pedantic(target=resize, args=(image, output_shape),
kwargs={'mode':'edge','preserve_range': True,
'clip': False,
'anti_aliasing': anti_aliasing,
'order':order},
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("angle", [45])
@pytest.mark.parametrize("order", [0, 1, 3]) # supported by scikit-ipp
def test_rotate(benchmark, angle, order):
image = get_image_data(image_dtype=np.float32, shape=(13000, 13000))
result = benchmark.pedantic(target=rotate, args=(image, angle),
kwargs={'preserve_range': True, 'order':order,
'resize':True},
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("order", [0, 1, 3]) # supported by scikit-ipp
def test_warp(benchmark, order):
image = get_image_data(image_dtype=np.float32, shape=(13000, 13000))
mat = np.array([[ 0.70710678, -0.70710678, 0. ],
[ 0.70710678, 0.70710678, 0. ],
[ 0. , 0. , 1. ]],
dtype=np.double)
transf = AffineTransform(matrix=mat)
result = benchmark.pedantic(target=warp, args=(image, transf.params),
kwargs={'preserve_range': True, 'order':order},
rounds=ROUNDS, iterations=ITERATIONS)
| [
"numpy.random.random",
"pytest.param",
"pytest.mark.parametrize",
"numpy.random.randint",
"numpy.array",
"skipp.transform.AffineTransform"
] | [((4642, 4731), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""function"""', '[erosion, dilation]'], {'ids': "['erosion', 'dilation']"}), "('function', [erosion, dilation], ids=['erosion',\n 'dilation'])\n", (4665, 4731), False, 'import pytest\n'), ((4984, 5033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""anti_aliasing"""', '[False]'], {}), "('anti_aliasing', [False])\n", (5007, 5033), False, 'import pytest\n'), ((5036, 5079), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[0, 1, 3]'], {}), "('order', [0, 1, 3])\n", (5059, 5079), False, 'import pytest\n'), ((5644, 5682), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""angle"""', '[45]'], {}), "('angle', [45])\n", (5667, 5682), False, 'import pytest\n'), ((5685, 5728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[0, 1, 3]'], {}), "('order', [0, 1, 3])\n", (5708, 5728), False, 'import pytest\n'), ((6156, 6199), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', '[0, 1, 3]'], {}), "('order', [0, 1, 3])\n", (6179, 6199), False, 'import pytest\n'), ((6346, 6458), 'numpy.array', 'np.array', (['[[0.70710678, -0.70710678, 0.0], [0.70710678, 0.70710678, 0.0], [0.0, 0.0, 1.0]\n ]'], {'dtype': 'np.double'}), '([[0.70710678, -0.70710678, 0.0], [0.70710678, 0.70710678, 0.0], [\n 0.0, 0.0, 1.0]], dtype=np.double)\n', (6354, 6458), True, 'import numpy as np\n'), ((6577, 6604), 'skipp.transform.AffineTransform', 'AffineTransform', ([], {'matrix': 'mat'}), '(matrix=mat)\n', (6592, 6604), False, 'from skipp.transform import AffineTransform\n'), ((3187, 3212), 'pytest.param', 'pytest.param', (['(1)'], {'id': '"""1.0"""'}), "(1, id='1.0')\n", (3199, 3212), False, 'import pytest\n'), ((3250, 3277), 'pytest.param', 'pytest.param', (['(10)'], {'id': '"""10.0"""'}), "(10, id='10.0')\n", (3262, 3277), False, 'import pytest\n'), ((3532, 3563), 'pytest.param', 'pytest.param', (['sobel'], {'id': '"""sobel"""'}), "(sobel, id='sobel')\n", (3544, 3563), False, 'import pytest\n'), ((3603, 3638), 'pytest.param', 'pytest.param', (['sobel_v'], {'id': '"""sobel_v"""'}), "(sobel_v, id='sobel_v')\n", (3615, 3638), False, 'import pytest\n'), ((3678, 3713), 'pytest.param', 'pytest.param', (['sobel_h'], {'id': '"""sobel_h"""'}), "(sobel_h, id='sobel_h')\n", (3690, 3713), False, 'import pytest\n'), ((3753, 3788), 'pytest.param', 'pytest.param', (['prewitt'], {'id': '"""prewitt"""'}), "(prewitt, id='prewitt')\n", (3765, 3788), False, 'import pytest\n'), ((3828, 3867), 'pytest.param', 'pytest.param', (['prewitt_v'], {'id': '"""prewitt_v"""'}), "(prewitt_v, id='prewitt_v')\n", (3840, 3867), False, 'import pytest\n'), ((3907, 3946), 'pytest.param', 'pytest.param', (['prewitt_h'], {'id': '"""prewitt_h"""'}), "(prewitt_h, id='prewitt_h')\n", (3919, 3946), False, 'import pytest\n'), ((3986, 4021), 'pytest.param', 'pytest.param', (['laplace'], {'id': '"""laplace"""'}), "(laplace, id='laplace')\n", (3998, 4021), False, 'import pytest\n'), ((2979, 3031), 'numpy.random.randint', 'np.random.randint', (['(255)'], {'size': 'None', 'dtype': 'image_dtype'}), '(255, size=None, dtype=image_dtype)\n', (2996, 3031), True, 'import numpy as np\n'), ((2781, 2804), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (2797, 2804), True, 'import numpy as np\n'), ((2926, 2949), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (2942, 2949), True, 'import numpy as np\n')] |
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for horovod.tensorflow.keras in TensorFlow 2."""
import math
import tensorflow as tf
import numpy as np
import warnings
from distutils.version import LooseVersion
import pytest
from tensorflow import keras
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
import horovod.tensorflow.keras as hvd
_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion("2.4.0")
_PRE_TF_2_2_0 = LooseVersion(tf.__version__) < LooseVersion("2.2.0")
@pytest.mark.skipif(LooseVersion(tf.__version__) < LooseVersion('2.0.0'), reason='TensorFlow v2 tests')
class Tf2KerasTests(tf.test.TestCase):
"""
Tests for ops in horovod.tensorflow.keras.
"""
def __init__(self, *args, **kwargs):
super(Tf2KerasTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
def test_train_model_lr_schedule(self):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.001 * hvd.size(),
decay_steps=100000,
decay_rate=0.96,
staircase=True)
opt = tf.keras.optimizers.Adam(lr_schedule)
opt = hvd.DistributedOptimizer(opt)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.ThresholdedReLU(0.5))
model.compile(loss=keras.losses.mean_squared_error,
optimizer=opt,
metrics=[keras.metrics.categorical_accuracy],
experimental_run_tf_function=False)
x = np.random.random((1, 3))
y = np.random.random((1, 3, 2))
# No assertions, we just need to verify that it doesn't hang or error
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)]
model.fit(x,
y,
steps_per_epoch=10,
callbacks=callbacks,
epochs=1)
def test_sparse_as_dense(self):
opt = keras.optimizers.RMSprop(lr=0.0001)
opt = hvd.DistributedOptimizer(opt, sparse_as_dense=True)
model = keras.models.Sequential()
model.add(keras.layers.Embedding(1000, 64, input_length=10))
model.compile(loss=keras.losses.mean_squared_error,
optimizer=opt,
experimental_run_tf_function=False)
x = np.random.randint(1000, size=(32, 10))
y = np.random.random((32, 10, 64))
# No assertions, we just need to verify that it doesn't hang
model.train_on_batch(x, y)
def test_from_config(self):
opt = keras.optimizers.Adam()
hopt = hvd.DistributedOptimizer(opt)
cfg = hopt.get_config()
hopt_copy1 = hopt.from_config(cfg)
self.assertEqual(cfg, hopt_copy1.get_config())
hopt_copy2 = hopt.__class__.from_config(cfg)
self.assertEqual(cfg, hopt_copy2.get_config())
def test_elastic_state(self):
v = 1.0 if hvd.rank() == 0 else 2.0
model1 = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation='softmax')
])
model1.build((2, 2))
model1.set_weights(
[np.array([[v, v], [v, v]], dtype=np.float32),
np.array([v, v], dtype=np.float32)])
model2 = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation='softmax')
])
model2.build((2, 2))
model2.set_weights(
[np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),
np.array([0.0, 0.0], dtype=np.float32)])
optimizer = tf.optimizers.Adam(0.001 * hvd.size())
state = hvd.elastic.KerasState(model1, optimizer, batch=20 + hvd.rank(), epoch=10 + hvd.rank())
state.sync()
model1_weights = model1.get_weights()
model2_weights = model2.get_weights()
# After sync, all values should match the root rank
for w in state.model.get_weights():
self.assertAllClose(w, np.ones_like(w))
assert state.batch == 20
assert state.epoch == 10
# Partially modify then restore
model1.set_weights(model2_weights)
state.batch = 21
state.epoch = 11
state.restore()
for w1, w2 in zip(model1.get_weights(), model1_weights):
self.assertAllClose(w1, w2)
assert state.batch == 20
assert state.epoch == 10
# Partially modify then commit
model1.set_weights(model2_weights)
state.batch = 21
state.epoch = 11
state.commit()
state.restore()
for w1, w2 in zip(model1.get_weights(), model2_weights):
self.assertAllClose(w1, w2)
assert state.batch == 21
assert state.epoch == 11
def test_gradient_aggregation(self):
class TestingOptimizer(optimizer_v2.OptimizerV2):
"""
Custom optimizer we use for testing gradient aggregation.
"""
def get_config(self):
config = super(TestingOptimizer, self).get_config()
return config
def _create_slots(self, var_list):
# Only needed for TF < 2.2.
pass
def _resource_apply_dense(self, grad, var, apply_state=None):
return var.assign_add(grad)
backward_passes_per_step = 4
hvd_optimizer = hvd.DistributedOptimizer(
optimizer=TestingOptimizer("test"),
backward_passes_per_step=backward_passes_per_step,
average_aggregated_gradients=True,
)
_ = hvd_optimizer.iterations
def compute_expected_value(batch_id):
sum_per_aggregation = 0.0
for _ in range(backward_passes_per_step):
grads_for_batch = 0.0
for rank in range(hvd.size()):
grads_for_batch += rank
# Apply `average_aggregated_gradients`.
grads_for_batch /= float(backward_passes_per_step)
# Averages across workers.
sum_per_aggregation += grads_for_batch / float(hvd.size())
aggregations_completed = math.floor((batch_id + 1) / backward_passes_per_step)
return aggregations_completed * sum_per_aggregation
gradients = [tf.constant([float(hvd.rank())])]
variables = [tf.Variable([0.0])]
for idx in range(10):
if _PRE_TF_2_2_0:
updated_gradients = hvd_optimizer._allreduce(gradients)
hvd_optimizer.apply_gradients(zip(updated_gradients, variables))
elif _PRE_TF_2_4_0:
# In 2.2 and 2.3 the horovod optimizer sets `_HAS_AGGREGATE_GRAD = True`.
# This configures tf.keras to call `_aggregate_gradients()` outside of
# `apply_gradients()` and to set `experimental_aggregate_gradients` to
# False when calling `apply_gradients()` to prevent it from calling
# `_aggregate_gradients()` again.
updated_gradients = hvd_optimizer._aggregate_gradients(
zip(gradients, variables))
hvd_optimizer.apply_gradients(
zip(updated_gradients, variables),
experimental_aggregate_gradients=False
)
else:
hvd_optimizer.apply_gradients(zip(gradients, variables))
updated_variable_value = variables[0][0].numpy()
assert updated_variable_value == compute_expected_value(idx)
assert idx + 1 == hvd_optimizer.iterations.numpy()
| [
"math.floor",
"horovod.tensorflow.keras.size",
"numpy.array",
"tensorflow.keras.layers.Dense",
"horovod.tensorflow.keras.init",
"horovod.tensorflow.keras.rank",
"numpy.random.random",
"warnings.simplefilter",
"tensorflow.keras.models.Sequential",
"tensorflow.Variable",
"horovod.tensorflow.keras.... | [((1029, 1057), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (1041, 1057), False, 'from distutils.version import LooseVersion\n'), ((1060, 1081), 'distutils.version.LooseVersion', 'LooseVersion', (['"""2.4.0"""'], {}), "('2.4.0')\n", (1072, 1081), False, 'from distutils.version import LooseVersion\n'), ((1098, 1126), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (1110, 1126), False, 'from distutils.version import LooseVersion\n'), ((1129, 1150), 'distutils.version.LooseVersion', 'LooseVersion', (['"""2.2.0"""'], {}), "('2.2.0')\n", (1141, 1150), False, 'from distutils.version import LooseVersion\n'), ((1470, 1501), 'warnings.simplefilter', 'warnings.simplefilter', (['"""module"""'], {}), "('module')\n", (1491, 1501), False, 'import warnings\n'), ((1510, 1520), 'horovod.tensorflow.keras.init', 'hvd.init', ([], {}), '()\n', (1518, 1520), True, 'import horovod.tensorflow.keras as hvd\n'), ((1537, 1588), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1581, 1588), True, 'import tensorflow as tf\n'), ((2031, 2068), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr_schedule'], {}), '(lr_schedule)\n', (2055, 2068), True, 'import tensorflow as tf\n'), ((2083, 2112), 'horovod.tensorflow.keras.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {}), '(opt)\n', (2107, 2112), True, 'import horovod.tensorflow.keras as hvd\n'), ((2130, 2155), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (2153, 2155), False, 'from tensorflow import keras\n'), ((2552, 2576), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (2568, 2576), True, 'import numpy as np\n'), ((2589, 2616), 'numpy.random.random', 'np.random.random', (['(1, 3, 2)'], {}), '((1, 3, 2))\n', (2605, 2616), True, 'import numpy as np\n'), ((2966, 3001), 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (2990, 3001), False, 'from tensorflow import keras\n'), ((3016, 3067), 'horovod.tensorflow.keras.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {'sparse_as_dense': '(True)'}), '(opt, sparse_as_dense=True)\n', (3040, 3067), True, 'import horovod.tensorflow.keras as hvd\n'), ((3085, 3110), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (3108, 3110), False, 'from tensorflow import keras\n'), ((3348, 3386), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {'size': '(32, 10)'}), '(1000, size=(32, 10))\n', (3365, 3386), True, 'import numpy as np\n'), ((3399, 3429), 'numpy.random.random', 'np.random.random', (['(32, 10, 64)'], {}), '((32, 10, 64))\n', (3415, 3429), True, 'import numpy as np\n'), ((3581, 3604), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (3602, 3604), False, 'from tensorflow import keras\n'), ((3620, 3649), 'horovod.tensorflow.keras.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {}), '(opt)\n', (3644, 3649), True, 'import horovod.tensorflow.keras as hvd\n'), ((1173, 1201), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (1185, 1201), False, 'from distutils.version import LooseVersion\n'), ((1204, 1225), 'distutils.version.LooseVersion', 'LooseVersion', (['"""2.0.0"""'], {}), "('2.0.0')\n", (1216, 1225), False, 'from distutils.version import LooseVersion\n'), ((1626, 1677), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (1666, 1677), True, 'import tensorflow as tf\n'), ((2174, 2213), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(2)'], {'input_shape': '(3,)'}), '(2, input_shape=(3,))\n', (2192, 2213), False, 'from tensorflow import keras\n'), ((2233, 2261), 'tensorflow.keras.layers.RepeatVector', 'keras.layers.RepeatVector', (['(3)'], {}), '(3)\n', (2258, 2261), False, 'from tensorflow import keras\n'), ((2281, 2314), 'tensorflow.keras.layers.ThresholdedReLU', 'keras.layers.ThresholdedReLU', (['(0.5)'], {}), '(0.5)\n', (2309, 2314), False, 'from tensorflow import keras\n'), ((2717, 2766), 'horovod.tensorflow.keras.callbacks.BroadcastGlobalVariablesCallback', 'hvd.callbacks.BroadcastGlobalVariablesCallback', (['(0)'], {}), '(0)\n', (2763, 2766), True, 'import horovod.tensorflow.keras as hvd\n'), ((3129, 3178), 'tensorflow.keras.layers.Embedding', 'keras.layers.Embedding', (['(1000)', '(64)'], {'input_length': '(10)'}), '(1000, 64, input_length=10)\n', (3151, 3178), False, 'from tensorflow import keras\n'), ((7128, 7181), 'math.floor', 'math.floor', (['((batch_id + 1) / backward_passes_per_step)'], {}), '((batch_id + 1) / backward_passes_per_step)\n', (7138, 7181), False, 'import math\n'), ((7323, 7341), 'tensorflow.Variable', 'tf.Variable', (['[0.0]'], {}), '([0.0])\n', (7334, 7341), True, 'import tensorflow as tf\n'), ((1916, 1926), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (1924, 1926), True, 'import horovod.tensorflow.keras as hvd\n'), ((3944, 3954), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (3952, 3954), True, 'import horovod.tensorflow.keras as hvd\n'), ((4020, 4066), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (4041, 4066), True, 'import tensorflow as tf\n'), ((4148, 4192), 'numpy.array', 'np.array', (['[[v, v], [v, v]]'], {'dtype': 'np.float32'}), '([[v, v], [v, v]], dtype=np.float32)\n', (4156, 4192), True, 'import numpy as np\n'), ((4208, 4242), 'numpy.array', 'np.array', (['[v, v]'], {'dtype': 'np.float32'}), '([v, v], dtype=np.float32)\n', (4216, 4242), True, 'import numpy as np\n'), ((4297, 4343), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (4318, 4343), True, 'import tensorflow as tf\n'), ((4425, 4477), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0]]'], {'dtype': 'np.float32'}), '([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n', (4433, 4477), True, 'import numpy as np\n'), ((4493, 4531), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0], dtype=np.float32)\n', (4501, 4531), True, 'import numpy as np\n'), ((4582, 4592), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (4590, 4592), True, 'import horovod.tensorflow.keras as hvd\n'), ((4953, 4968), 'numpy.ones_like', 'np.ones_like', (['w'], {}), '(w)\n', (4965, 4968), True, 'import numpy as np\n'), ((1755, 1771), 'horovod.tensorflow.keras.local_rank', 'hvd.local_rank', ([], {}), '()\n', (1769, 1771), True, 'import horovod.tensorflow.keras as hvd\n'), ((4664, 4674), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (4672, 4674), True, 'import horovod.tensorflow.keras as hvd\n'), ((4687, 4697), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (4695, 4697), True, 'import horovod.tensorflow.keras as hvd\n'), ((6790, 6800), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (6798, 6800), True, 'import horovod.tensorflow.keras as hvd\n'), ((7078, 7088), 'horovod.tensorflow.keras.size', 'hvd.size', ([], {}), '()\n', (7086, 7088), True, 'import horovod.tensorflow.keras as hvd\n'), ((7287, 7297), 'horovod.tensorflow.keras.rank', 'hvd.rank', ([], {}), '()\n', (7295, 7297), True, 'import horovod.tensorflow.keras as hvd\n')] |
import os
import numpy as np
import scipy.io as io
def load_data_mat(filename, max_samples, seed=42):
raw = io.loadmat(filename)
X = raw['X'] # Array of [32, 32, 3, n_samples]
y = raw['y'] # Array of [n_samples, 1]
X = np.moveaxis(X, [3], [0])
y = y.flatten()
# Fix up class 0 to be 0
y[y == 10] = 0
np.random.seed(seed)
samples = np.random.choice(np.arange(X.shape[0]),
max_samples,
replace=False)
return X[samples].astype(np.float32), y[samples]
def load_svhn(folder, max_train, max_test):
train_X, train_y = load_data_mat(os.path.join(folder, "train_32x32.mat"), max_train)
test_X, test_y = load_data_mat(os.path.join(folder, "test_32x32.mat"), max_test)
return train_X, train_y, test_X, test_y
def random_split_train_val(X, y, num_val, seed=42):
np.random.seed(seed)
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
train_indices = indices[:-num_val]
train_X = X[train_indices]
train_y = y[train_indices]
val_indices = indices[-num_val:]
val_X = X[val_indices]
val_y = y[val_indices]
return train_X, train_y, val_X, val_y
| [
"scipy.io.loadmat",
"os.path.join",
"numpy.random.seed",
"numpy.moveaxis",
"numpy.arange",
"numpy.random.shuffle"
] | [((116, 136), 'scipy.io.loadmat', 'io.loadmat', (['filename'], {}), '(filename)\n', (126, 136), True, 'import scipy.io as io\n'), ((241, 265), 'numpy.moveaxis', 'np.moveaxis', (['X', '[3]', '[0]'], {}), '(X, [3], [0])\n', (252, 265), True, 'import numpy as np\n'), ((343, 363), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (357, 363), True, 'import numpy as np\n'), ((888, 908), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (902, 908), True, 'import numpy as np\n'), ((924, 945), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (933, 945), True, 'import numpy as np\n'), ((950, 976), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (967, 976), True, 'import numpy as np\n'), ((395, 416), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (404, 416), True, 'import numpy as np\n'), ((649, 688), 'os.path.join', 'os.path.join', (['folder', '"""train_32x32.mat"""'], {}), "(folder, 'train_32x32.mat')\n", (661, 688), False, 'import os\n'), ((736, 774), 'os.path.join', 'os.path.join', (['folder', '"""test_32x32.mat"""'], {}), "(folder, 'test_32x32.mat')\n", (748, 774), False, 'import os\n')] |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains a deep Bayesian neural net to classify MNIST digits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from absl import flags
import matplotlib
matplotlib.use("Agg")
from matplotlib import figure # pylint: disable=g-import-not-at-top
from matplotlib.backends import backend_agg
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.contrib.learn.python.learn.datasets import mnist
# TODO(b/78137893): Integration tests currently fail with seaborn imports.
try:
import seaborn as sns # pylint: disable=g-import-not-at-top
HAS_SEABORN = True
except ImportError:
HAS_SEABORN = False
tfd = tf.contrib.distributions
IMAGE_SHAPE = [28, 28]
flags.DEFINE_float("learning_rate",
default=0.01,
help="Initial learning rate.")
flags.DEFINE_integer("max_steps",
default=6000,
help="Number of training steps to run.")
flags.DEFINE_list("layer_sizes",
default=["128", "128"],
help="Comma-separated list denoting hidden units per layer.")
flags.DEFINE_string("activation",
default="relu",
help="Activation function for all hidden layers.")
flags.DEFINE_integer("batch_size",
default=128,
help="Batch size. Must divide evenly into dataset sizes.")
flags.DEFINE_string("data_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"bayesian_neural_network/data"),
help="Directory where data is stored (if using real data).")
flags.DEFINE_string(
"model_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"bayesian_neural_network/"),
help="Directory to put the model's fit.")
flags.DEFINE_integer("viz_steps",
default=400,
help="Frequency at which save visualizations.")
flags.DEFINE_integer("num_monte_carlo",
default=50,
help="Network draws to compute predictive probabilities.")
flags.DEFINE_bool("fake_data",
default=None,
help="If true, uses fake data. Defaults to real data.")
FLAGS = flags.FLAGS
def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
"""Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
"""
fig = figure.Figure(figsize=(6, 3))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 2, 1)
for n, qm in zip(names, qm_vals):
sns.distplot(qm.flatten(), ax=ax, label=n)
ax.set_title("weight means")
ax.set_xlim([-1.5, 1.5])
ax.set_ylim([0, 4.])
ax.legend()
ax = fig.add_subplot(1, 2, 2)
for n, qs in zip(names, qs_vals):
sns.distplot(qs.flatten(), ax=ax)
ax.set_title("weight stddevs")
ax.set_xlim([0, 1.])
ax.set_ylim([0, 25.])
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
def plot_heldout_prediction(input_vals, probs,
fname, n=10, title=""):
"""Save a PNG plot visualizing posterior uncertainty on heldout data.
Args:
input_vals: A `float`-like Numpy `array` of shape
`[num_heldout] + IMAGE_SHAPE`, containing heldout input images.
probs: A `float`-like Numpy array of shape `[num_monte_carlo,
num_heldout, num_classes]` containing Monte Carlo samples of
class probabilities for each heldout sample.
fname: Python `str` filename to save the plot to.
n: Python `int` number of datapoints to vizualize.
title: Python `str` title for the plot.
"""
fig = figure.Figure(figsize=(9, 3*n))
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(n, 3, 3*i + 1)
ax.imshow(input_vals[i, :].reshape(IMAGE_SHAPE), interpolation="None")
ax = fig.add_subplot(n, 3, 3*i + 2)
for prob_sample in probs:
sns.barplot(np.arange(10), prob_sample[i, :], alpha=0.1, ax=ax)
ax.set_ylim([0, 1])
ax.set_title("posterior samples")
ax = fig.add_subplot(n, 3, 3*i + 3)
sns.barplot(np.arange(10), np.mean(probs[:, i, :], axis=0), ax=ax)
ax.set_ylim([0, 1])
ax.set_title("predictive probs")
fig.suptitle(title)
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
def build_input_pipeline(mnist_data, batch_size, heldout_size):
"""Build an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.train.images, np.int32(mnist_data.train.labels)))
training_batches = training_dataset.repeat().batch(batch_size)
training_iterator = training_batches.make_one_shot_iterator()
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.validation.images,
np.int32(mnist_data.validation.labels)))
heldout_frozen = (heldout_dataset.take(heldout_size).
repeat().batch(heldout_size))
heldout_iterator = heldout_frozen.make_one_shot_iterator()
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.placeholder(tf.string, shape=[])
feedable_iterator = tf.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
return images, labels, handle, training_iterator, heldout_iterator
def build_fake_data(num_examples=10):
"""Build fake MNIST-style data for unit testing."""
class Dummy(object):
pass
num_examples = 10
mnist_data = Dummy()
mnist_data.train = Dummy()
mnist_data.train.images = np.float32(np.random.randn(
num_examples, np.prod(IMAGE_SHAPE)))
mnist_data.train.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.train.num_examples = num_examples
mnist_data.validation = Dummy()
mnist_data.validation.images = np.float32(np.random.randn(
num_examples, np.prod(IMAGE_SHAPE)))
mnist_data.validation.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.validation.num_examples = num_examples
return mnist_data
def main(argv):
del argv # unused
FLAGS.layer_sizes = [int(units) for units in FLAGS.layer_sizes]
FLAGS.activation = getattr(tf.nn, FLAGS.activation)
if tf.gfile.Exists(FLAGS.model_dir):
tf.logging.warning(
"Warning: deleting old log directory at {}".format(FLAGS.model_dir))
tf.gfile.DeleteRecursively(FLAGS.model_dir)
tf.gfile.MakeDirs(FLAGS.model_dir)
if FLAGS.fake_data:
mnist_data = build_fake_data()
else:
mnist_data = mnist.read_data_sets(FLAGS.data_dir)
with tf.Graph().as_default():
(images, labels, handle,
training_iterator, heldout_iterator) = build_input_pipeline(
mnist_data, FLAGS.batch_size, mnist_data.validation.num_examples)
# Build a Bayesian neural net. We use the Flipout Monte Carlo estimator for
# each layer: this enables lower variance stochastic gradients than naive
# reparameterization.
with tf.name_scope("bayesian_neural_net", values=[images]):
neural_net = tf.keras.Sequential()
for units in FLAGS.layer_sizes:
layer = tfp.layers.DenseFlipout(
units,
activation=FLAGS.activation)
neural_net.add(layer)
neural_net.add(tfp.layers.DenseFlipout(10))
logits = neural_net(images)
labels_distribution = tfd.Categorical(logits=logits)
# Compute the -ELBO as the loss, averaged over the batch size.
neg_log_likelihood = -tf.reduce_mean(labels_distribution.log_prob(labels))
kl = sum(neural_net.losses) / mnist_data.train.num_examples
elbo_loss = neg_log_likelihood + kl
# Build metrics for evaluation. Predictions are formed from a single forward
# pass of the probabilistic layers. They are cheap but noisy predictions.
predictions = tf.argmax(logits, axis=1)
accuracy, accuracy_update_op = tf.metrics.accuracy(
labels=labels, predictions=predictions)
# Extract weight posterior statistics for later visualization.
names = []
qmeans = []
qstds = []
for i, layer in enumerate(neural_net.layers):
q = layer.kernel_posterior
names.append("Layer {}".format(i))
qmeans.append(q.mean())
qstds.append(q.stddev())
with tf.name_scope("train"):
opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = opt.minimize(elbo_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Run the training loop.
train_handle = sess.run(training_iterator.string_handle())
heldout_handle = sess.run(heldout_iterator.string_handle())
for step in range(FLAGS.max_steps):
_ = sess.run([train_op, accuracy_update_op],
feed_dict={handle: train_handle})
if step % 100 == 0:
loss_value, accuracy_value = sess.run(
[elbo_loss, accuracy], feed_dict={handle: train_handle})
print("Step: {:>3d} Loss: {:.3f} Accuracy: {:.3f}".format(
step, loss_value, accuracy_value))
if (step+1) % FLAGS.viz_steps == 0:
# Compute log prob of heldout set by averaging draws from the model:
# p(heldout | train) = int_model p(heldout|model) p(model|train)
# ~= 1/n * sum_{i=1}^n p(heldout | model_i)
# where model_i is a draw from the posterior p(model|train).
probs = np.asarray([sess.run((labels_distribution.probs),
feed_dict={handle: heldout_handle})
for _ in range(FLAGS.num_monte_carlo)])
mean_probs = np.mean(probs, axis=0)
image_vals, label_vals = sess.run((images, labels),
feed_dict={handle: heldout_handle})
heldout_lp = np.mean(np.log(mean_probs[np.arange(mean_probs.shape[0]),
label_vals.flatten()]))
print(" ... Held-out nats: {:.3f}".format(heldout_lp))
qm_vals, qs_vals = sess.run((qmeans, qstds))
if HAS_SEABORN:
plot_weight_posteriors(names, qm_vals, qs_vals,
fname=os.path.join(
FLAGS.model_dir,
"step{:05d}_weights.png".format(step)))
plot_heldout_prediction(image_vals, probs,
fname=os.path.join(
FLAGS.model_dir,
"step{:05d}_pred.png".format(step)),
title="mean heldout logprob {:.2f}"
.format(heldout_lp))
if __name__ == "__main__":
tf.app.run()
| [
"tensorflow.local_variables_initializer",
"numpy.prod",
"numpy.int32",
"tensorflow.gfile.MakeDirs",
"absl.flags.DEFINE_float",
"numpy.arange",
"absl.flags.DEFINE_list",
"tensorflow.app.run",
"numpy.mean",
"tensorflow.Graph",
"tensorflow.gfile.Exists",
"tensorflow.keras.Sequential",
"tensorfl... | [((928, 949), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (942, 949), False, 'import matplotlib\n'), ((1473, 1558), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.01)', 'help': '"""Initial learning rate."""'}), "('learning_rate', default=0.01, help='Initial learning rate.'\n )\n", (1491, 1558), False, 'from absl import flags\n'), ((1592, 1685), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_steps"""'], {'default': '(6000)', 'help': '"""Number of training steps to run."""'}), "('max_steps', default=6000, help=\n 'Number of training steps to run.')\n", (1612, 1685), False, 'from absl import flags\n'), ((1723, 1846), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""layer_sizes"""'], {'default': "['128', '128']", 'help': '"""Comma-separated list denoting hidden units per layer."""'}), "('layer_sizes', default=['128', '128'], help=\n 'Comma-separated list denoting hidden units per layer.')\n", (1740, 1846), False, 'from absl import flags\n'), ((1878, 1983), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""activation"""'], {'default': '"""relu"""', 'help': '"""Activation function for all hidden layers."""'}), "('activation', default='relu', help=\n 'Activation function for all hidden layers.')\n", (1897, 1983), False, 'from absl import flags\n'), ((2019, 2130), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""'], {'default': '(128)', 'help': '"""Batch size. Must divide evenly into dataset sizes."""'}), "('batch_size', default=128, help=\n 'Batch size. Must divide evenly into dataset sizes.')\n", (2039, 2130), False, 'from absl import flags\n'), ((2627, 2726), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""viz_steps"""'], {'default': '(400)', 'help': '"""Frequency at which save visualizations."""'}), "('viz_steps', default=400, help=\n 'Frequency at which save visualizations.')\n", (2647, 2726), False, 'from absl import flags\n'), ((2764, 2879), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_monte_carlo"""'], {'default': '(50)', 'help': '"""Network draws to compute predictive probabilities."""'}), "('num_monte_carlo', default=50, help=\n 'Network draws to compute predictive probabilities.')\n", (2784, 2879), False, 'from absl import flags\n'), ((2917, 3022), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""fake_data"""'], {'default': 'None', 'help': '"""If true, uses fake data. Defaults to real data."""'}), "('fake_data', default=None, help=\n 'If true, uses fake data. Defaults to real data.')\n", (2934, 3022), False, 'from absl import flags\n'), ((3690, 3719), 'matplotlib.figure.Figure', 'figure.Figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (3703, 3719), False, 'from matplotlib import figure\n'), ((3731, 3763), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'backend_agg.FigureCanvasAgg', (['fig'], {}), '(fig)\n', (3758, 3763), False, 'from matplotlib.backends import backend_agg\n'), ((4918, 4951), 'matplotlib.figure.Figure', 'figure.Figure', ([], {'figsize': '(9, 3 * n)'}), '(figsize=(9, 3 * n))\n', (4931, 4951), False, 'from matplotlib import figure\n'), ((4961, 4993), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'backend_agg.FigureCanvasAgg', (['fig'], {}), '(fig)\n', (4988, 4993), False, 'from matplotlib.backends import backend_agg\n'), ((6613, 6648), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'shape': '[]'}), '(tf.string, shape=[])\n', (6627, 6648), True, 'import tensorflow as tf\n'), ((6671, 6781), 'tensorflow.data.Iterator.from_string_handle', 'tf.data.Iterator.from_string_handle', (['handle', 'training_batches.output_types', 'training_batches.output_shapes'], {}), '(handle, training_batches.output_types,\n training_batches.output_shapes)\n', (6706, 6781), True, 'import tensorflow as tf\n'), ((7812, 7844), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (7827, 7844), True, 'import tensorflow as tf\n'), ((7997, 8031), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (8014, 8031), True, 'import tensorflow as tf\n'), ((12367, 12379), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (12377, 12379), True, 'import tensorflow as tf\n'), ((7951, 7994), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.model_dir'], {}), '(FLAGS.model_dir)\n', (7977, 7994), True, 'import tensorflow as tf\n'), ((8115, 8151), 'tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets', 'mnist.read_data_sets', (['FLAGS.data_dir'], {}), '(FLAGS.data_dir)\n', (8135, 8151), False, 'from tensorflow.contrib.learn.python.learn.datasets import mnist\n'), ((9386, 9411), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (9395, 9411), True, 'import tensorflow as tf\n'), ((9447, 9506), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'labels', 'predictions': 'predictions'}), '(labels=labels, predictions=predictions)\n', (9466, 9506), True, 'import tensorflow as tf\n'), ((2241, 2273), 'os.getenv', 'os.getenv', (['"""TEST_TMPDIR"""', '"""/tmp"""'], {}), "('TEST_TMPDIR', '/tmp')\n", (2250, 2273), False, 'import os\n'), ((2493, 2525), 'os.getenv', 'os.getenv', (['"""TEST_TMPDIR"""', '"""/tmp"""'], {}), "('TEST_TMPDIR', '/tmp')\n", (2502, 2525), False, 'import os\n'), ((5392, 5405), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5401, 5405), True, 'import numpy as np\n'), ((5407, 5438), 'numpy.mean', 'np.mean', (['probs[:, i, :]'], {'axis': '(0)'}), '(probs[:, i, :], axis=0)\n', (5414, 5438), True, 'import numpy as np\n'), ((5897, 5930), 'numpy.int32', 'np.int32', (['mnist_data.train.labels'], {}), '(mnist_data.train.labels)\n', (5905, 5930), True, 'import numpy as np\n'), ((6290, 6328), 'numpy.int32', 'np.int32', (['mnist_data.validation.labels'], {}), '(mnist_data.validation.labels)\n', (6298, 6328), True, 'import numpy as np\n'), ((7179, 7199), 'numpy.prod', 'np.prod', (['IMAGE_SHAPE'], {}), '(IMAGE_SHAPE)\n', (7186, 7199), True, 'import numpy as np\n'), ((7268, 7291), 'numpy.arange', 'np.arange', (['num_examples'], {}), '(num_examples)\n', (7277, 7291), True, 'import numpy as np\n'), ((7456, 7476), 'numpy.prod', 'np.prod', (['IMAGE_SHAPE'], {}), '(IMAGE_SHAPE)\n', (7463, 7476), True, 'import numpy as np\n'), ((7550, 7573), 'numpy.arange', 'np.arange', (['num_examples'], {}), '(num_examples)\n', (7559, 7573), True, 'import numpy as np\n'), ((8549, 8602), 'tensorflow.name_scope', 'tf.name_scope', (['"""bayesian_neural_net"""'], {'values': '[images]'}), "('bayesian_neural_net', values=[images])\n", (8562, 8602), True, 'import tensorflow as tf\n'), ((8623, 8644), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (8642, 8644), True, 'import tensorflow as tf\n'), ((9825, 9847), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (9838, 9847), True, 'import tensorflow as tf\n'), ((9861, 9918), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'FLAGS.learning_rate'}), '(learning_rate=FLAGS.learning_rate)\n', (9883, 9918), True, 'import tensorflow as tf\n'), ((9974, 9986), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9984, 9986), True, 'import tensorflow as tf\n'), ((5219, 5232), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5228, 5232), True, 'import numpy as np\n'), ((8160, 8170), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8168, 8170), True, 'import tensorflow as tf\n'), ((8699, 8758), 'tensorflow_probability.layers.DenseFlipout', 'tfp.layers.DenseFlipout', (['units'], {'activation': 'FLAGS.activation'}), '(units, activation=FLAGS.activation)\n', (8722, 8758), True, 'import tensorflow_probability as tfp\n'), ((8835, 8862), 'tensorflow_probability.layers.DenseFlipout', 'tfp.layers.DenseFlipout', (['(10)'], {}), '(10)\n', (8858, 8862), True, 'import tensorflow_probability as tfp\n'), ((10002, 10035), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10033, 10035), True, 'import tensorflow as tf\n'), ((10052, 10084), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (10082, 10084), True, 'import tensorflow as tf\n'), ((11244, 11266), 'numpy.mean', 'np.mean', (['probs'], {'axis': '(0)'}), '(probs, axis=0)\n', (11251, 11266), True, 'import numpy as np\n'), ((11459, 11489), 'numpy.arange', 'np.arange', (['mean_probs.shape[0]'], {}), '(mean_probs.shape[0])\n', (11468, 11489), True, 'import numpy as np\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas
from compas.datastructures import Mesh
from compas.utilities import flatten
from numpy import array
mesh = Mesh.from_obj(compas.get('faces.obj'))
xyz = mesh.get_vertices_attributes('xyz')
# flatten the list of nested xyz coordinates
# [[x, y, z], [x, y, z], ...] => [x, y, z, x, y, z, ...]
xyz_1 = []
for x, y, z in xyz:
xyz_1.append(x)
xyz_1.append(y)
xyz_1.append(z)
xyz_2 = []
for point in xyz:
xyz_2.extend(point) # xyz_2 += point
xyz_3 = [axis for point in xyz for axis in point]
xyz_4 = list(flatten(xyz))
xyz_5 = array(xyz).flatten().tolist()
print(xyz_1[0:3])
print(xyz_2[0:3])
print(xyz_3[0:3])
print(xyz_4[0:3])
print(xyz_5[0:3])
# get the x, y, z column vectors of the nx3 matrix xyz
# [[x, y, z], [x, y, z], ...] => [x, x, ...], [y, y, ...], [z, z, ...]
X_1 = []
Y_1 = []
Z_1 = []
for x, y, z in xyz:
X_1.append(x)
Y_1.append(y)
Z_1.append(z)
X_2 = [x for x, _, _ in xyz]
Y_2 = [y for _, y, _ in xyz]
Z_2 = [z for _, _, z in xyz]
X_3, Y_3, Z_3 = list(zip(*xyz))
X_4, Y_4, Z_4 = array(xyz).T.tolist()
print(X_1[0:3], Y_1[0:3], Z_1[0:3])
print(X_2[0:3], Y_2[0:3], Z_2[0:3])
print(X_3[0:3], Y_3[0:3], Z_3[0:3])
print(X_4[0:3], Y_4[0:3], Z_4[0:3])
# count the number of unique x, y, z coordinates up to 3-digit precision
| [
"compas.get",
"numpy.array",
"compas.utilities.flatten"
] | [((248, 271), 'compas.get', 'compas.get', (['"""faces.obj"""'], {}), "('faces.obj')\n", (258, 271), False, 'import compas\n'), ((651, 663), 'compas.utilities.flatten', 'flatten', (['xyz'], {}), '(xyz)\n', (658, 663), False, 'from compas.utilities import flatten\n'), ((1165, 1175), 'numpy.array', 'array', (['xyz'], {}), '(xyz)\n', (1170, 1175), False, 'from numpy import array\n'), ((674, 684), 'numpy.array', 'array', (['xyz'], {}), '(xyz)\n', (679, 684), False, 'from numpy import array\n')] |
from argparse import ArgumentParser
from typing import Dict, List
from unittest import mock
from unittest.mock import call, patch
import networkx as nx
import numpy as np
import torch
from cogdl.data import Graph
from cogdl.models.emb.deepwalk import DeepWalk
class Word2VecFake:
def __init__(self, data: Dict[str, List[float]]) -> None:
self.wv = data
embed_1 = [-0.1, 0.3, 0.5, 0.7]
embed_2 = [0.2, 0.4, 0.6, -0.8]
embed_3 = [0.3, 0.2, 0.1, -0.1]
def creator(walks, size, window, min_count, sg, workers, iter):
return Word2VecFake({"0": embed_1, "1": embed_2, "2": embed_3})
class Args:
hidden_size: int
walk_length: int
walk_num: int
window_size: int
worker: int
iteration: int
def get_args():
args = Args()
args.hidden_size = 4
args.walk_length = 5
args.walk_num = 3
args.window_size = 2
args.worker = 777
args.iteration = 10
return args
def test_adds_correct_args():
deep_walk_args = ["walk-length", "walk-num", "window-size", "worker", "iteration"]
deep_walk_calls = [call(f"--{x}", type=int, default=mock.ANY, help=mock.ANY) for x in deep_walk_args]
parser = ArgumentParser()
with patch.object(parser, "add_argument", return_value=None) as mocked_method:
DeepWalk.add_args(parser)
mocked_method.assert_has_calls(deep_walk_calls)
def test_correctly_builds():
args = get_args()
model = DeepWalk.build_model_from_args(args)
assert model.dimension == args.hidden_size
assert model.walk_length == args.walk_length
assert model.walk_num == args.walk_num
assert model.window_size == args.window_size
assert model.worker == args.worker
assert model.iteration == args.iteration
def test_will_return_computed_embeddings_for_simple_fully_connected_graph():
args = get_args()
model: DeepWalk = DeepWalk.build_model_from_args(args)
graph = Graph(edge_index=(torch.LongTensor([0]), torch.LongTensor([1])))
trained = model(graph, creator)
assert len(trained) == 2
np.testing.assert_array_equal(trained[0], embed_1)
np.testing.assert_array_equal(trained[1], embed_2)
def test_will_return_computed_embeddings_for_simple_graph():
args = get_args()
model: DeepWalk = DeepWalk.build_model_from_args(args)
graph = Graph(edge_index=(torch.LongTensor([0, 1]), torch.LongTensor([1, 2])))
trained = model(graph, creator)
assert len(trained) == 3
np.testing.assert_array_equal(trained[0], embed_1)
np.testing.assert_array_equal(trained[1], embed_2)
np.testing.assert_array_equal(trained[2], embed_3)
def test_will_pass_correct_number_of_walks():
args = get_args()
args.walk_num = 2
model: DeepWalk = DeepWalk.build_model_from_args(args)
graph = Graph(edge_index=(torch.LongTensor([0, 1]), torch.LongTensor([1, 2])))
captured_walks_no = []
def creator_mocked(walks, size, window, min_count, sg, workers, iter):
captured_walks_no.append(len(walks))
return creator(walks, size, window, min_count, sg, workers, iter)
model(graph, creator_mocked)
assert captured_walks_no[0] == args.walk_num * graph.num_nodes
if __name__ == "__main__":
test_adds_correct_args()
test_correctly_builds()
test_will_return_computed_embeddings_for_simple_fully_connected_graph()
test_will_return_computed_embeddings_for_simple_graph()
test_will_pass_correct_number_of_walks()
| [
"argparse.ArgumentParser",
"torch.LongTensor",
"unittest.mock.call",
"cogdl.models.emb.deepwalk.DeepWalk.build_model_from_args",
"cogdl.models.emb.deepwalk.DeepWalk.add_args",
"unittest.mock.patch.object",
"numpy.testing.assert_array_equal"
] | [((1164, 1180), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1178, 1180), False, 'from argparse import ArgumentParser\n'), ((1419, 1455), 'cogdl.models.emb.deepwalk.DeepWalk.build_model_from_args', 'DeepWalk.build_model_from_args', (['args'], {}), '(args)\n', (1449, 1455), False, 'from cogdl.models.emb.deepwalk import DeepWalk\n'), ((1851, 1887), 'cogdl.models.emb.deepwalk.DeepWalk.build_model_from_args', 'DeepWalk.build_model_from_args', (['args'], {}), '(args)\n', (1881, 1887), False, 'from cogdl.models.emb.deepwalk import DeepWalk\n'), ((2034, 2084), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trained[0]', 'embed_1'], {}), '(trained[0], embed_1)\n', (2063, 2084), True, 'import numpy as np\n'), ((2089, 2139), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trained[1]', 'embed_2'], {}), '(trained[1], embed_2)\n', (2118, 2139), True, 'import numpy as np\n'), ((2247, 2283), 'cogdl.models.emb.deepwalk.DeepWalk.build_model_from_args', 'DeepWalk.build_model_from_args', (['args'], {}), '(args)\n', (2277, 2283), False, 'from cogdl.models.emb.deepwalk import DeepWalk\n'), ((2436, 2486), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trained[0]', 'embed_1'], {}), '(trained[0], embed_1)\n', (2465, 2486), True, 'import numpy as np\n'), ((2491, 2541), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trained[1]', 'embed_2'], {}), '(trained[1], embed_2)\n', (2520, 2541), True, 'import numpy as np\n'), ((2546, 2596), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trained[2]', 'embed_3'], {}), '(trained[2], embed_3)\n', (2575, 2596), True, 'import numpy as np\n'), ((2711, 2747), 'cogdl.models.emb.deepwalk.DeepWalk.build_model_from_args', 'DeepWalk.build_model_from_args', (['args'], {}), '(args)\n', (2741, 2747), False, 'from cogdl.models.emb.deepwalk import DeepWalk\n'), ((1067, 1124), 'unittest.mock.call', 'call', (['f"""--{x}"""'], {'type': 'int', 'default': 'mock.ANY', 'help': 'mock.ANY'}), "(f'--{x}', type=int, default=mock.ANY, help=mock.ANY)\n", (1071, 1124), False, 'from unittest.mock import call, patch\n'), ((1190, 1245), 'unittest.mock.patch.object', 'patch.object', (['parser', '"""add_argument"""'], {'return_value': 'None'}), "(parser, 'add_argument', return_value=None)\n", (1202, 1245), False, 'from unittest.mock import call, patch\n'), ((1272, 1297), 'cogdl.models.emb.deepwalk.DeepWalk.add_args', 'DeepWalk.add_args', (['parser'], {}), '(parser)\n', (1289, 1297), False, 'from cogdl.models.emb.deepwalk import DeepWalk\n'), ((1918, 1939), 'torch.LongTensor', 'torch.LongTensor', (['[0]'], {}), '([0])\n', (1934, 1939), False, 'import torch\n'), ((1941, 1962), 'torch.LongTensor', 'torch.LongTensor', (['[1]'], {}), '([1])\n', (1957, 1962), False, 'import torch\n'), ((2314, 2338), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1]'], {}), '([0, 1])\n', (2330, 2338), False, 'import torch\n'), ((2340, 2364), 'torch.LongTensor', 'torch.LongTensor', (['[1, 2]'], {}), '([1, 2])\n', (2356, 2364), False, 'import torch\n'), ((2778, 2802), 'torch.LongTensor', 'torch.LongTensor', (['[0, 1]'], {}), '([0, 1])\n', (2794, 2802), False, 'import torch\n'), ((2804, 2828), 'torch.LongTensor', 'torch.LongTensor', (['[1, 2]'], {}), '([1, 2])\n', (2820, 2828), False, 'import torch\n')] |
'''
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
'''
import torch
from torch.nn import functional as F
import os
import sys
import copy
import argparse
from tqdm import tqdm
import pickle
from pytorch_transformers import *
import numpy as np
import random
from wsd_models.util import *
parser = argparse.ArgumentParser(description='BERT Frozen Probing Model for WSD')
parser.add_argument('--rand_seed', type=int, default=42)
parser.add_argument('--silent', action='store_true',
help='Flag to supress training progress bar for each epoch')
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--bsz', type=int, default=128)
parser.add_argument('--ckpt', type=str, required=True,
help='filepath at which to save best probing model (on dev set)')
parser.add_argument('--encoder-name', type=str, default='bert-base',
choices=['bert-base', 'bert-large', 'roberta-base', 'roberta-large'])
parser.add_argument('--kshot', type=int, default=-1,
help='if set to k (1+), will filter training data to only have up to k examples per sense')
parser.add_argument('--data-path', type=str, required=True,
help='Location of top-level directory for the Unified WSD Framework')
parser.add_argument('--eval', action='store_true',
help='Flag to set script to evaluate probe (rather than train)')
parser.add_argument('--split', type=str, default='semeval2007',
choices=['semeval2007', 'senseval2', 'senseval3', 'semeval2013', 'semeval2015', 'ALL', 'all-test'],
help='Which evaluation split on which to evaluate probe')
def wn_keys(data):
keys = []
for sent in data:
for form, lemma, pos, inst, _ in sent:
if inst != -1:
key = generate_key(lemma, pos)
keys.append(key)
return keys
def batchify(data, bsz=1):
print('Batching data with bsz={}...'.format(bsz))
batched_data = []
for i in range(0, len(data), bsz):
if i+bsz < len(data): d_arr = data[i:i+bsz]
else: d_arr = data[i:] #get remainder examples
batched_ids = torch.cat([ids for ids, _, _, _ in d_arr], dim=0)
batched_masks = torch.stack([mask for _, mask, _, _ in d_arr], dim=0)
batched_insts = [inst for _, _, inst, _ in d_arr]
batched_labels = torch.cat([label for _, _, _, label in d_arr], dim=0)
batched_data.append((batched_ids, batched_masks, batched_insts, batched_labels))
return batched_data
#takes in text data, tensorizes it for BERT, runs though BERT,
#filters out the context words (not labeled), and averages
#the representation(s) for words/phrases to be disambiguated
#output is tuples of (input tensor prepared for linear probing model,
#instance numbers (for dataset), tensor of label indexes)
def preprocess(tokenizer, context_model, text_data, label_space, label_map):
processed_examples = []
output_masks = []
instances = []
label_indexes = []
#tensorize data
for sent in tqdm(text_data):
sent_ids = [torch.tensor([tokenizer.encode(tokenizer.cls_token)])] #aka sos token, returns a list with single index
bert_mask = [-1]
for idx, (word, lemma, pos, inst, label) in enumerate(sent):
word_ids = torch.tensor([tokenizer.encode(word.lower())])
sent_ids.append(word_ids)
if inst != -1:
#masking for averaging of bert outputs
bert_mask.extend([idx]*word_ids.size(-1))
#tracking instance for sense-labeled word
instances.append(inst)
#adding label tensor for senes-labeled word
if label in label_space:
label_indexes.append(torch.tensor([label_space.index(label)]))
else:
label_indexes.append(torch.tensor([label_space.index('n/a')]))
#adding appropriate label space for sense-labeled word (we only use this for wsd task)
key = generate_key(lemma, pos)
if key in label_map:
l_space = label_map[key]
o_mask = torch.zeros(len(label_space))
for l in l_space: o_mask[l] = 1
output_masks.append(o_mask)
else:
output_masks.append(torch.ones(len(label_space))) #let this predict whatever -- should not use this (default to backoff for unseen forms)
else:
bert_mask.extend([-1]*word_ids.size(-1))
#add eos token
sent_ids.append(torch.tensor([tokenizer.encode(tokenizer.sep_token)])) #aka eos token
bert_mask.append(-1)
sent_ids = torch.cat(sent_ids, dim=-1)
#run inputs through frozen bert
sent_ids = sent_ids.cuda()
with torch.no_grad():
output = context_model(sent_ids)[0].squeeze().cpu()
#average outputs for subword units in same word/phrase, drop unlabeled words
combined_outputs = process_encoder_outputs(output, bert_mask)
processed_examples.extend(combined_outputs)
#package preprocessed data together + return
data = list(zip(processed_examples, output_masks, instances, label_indexes))
return data
def _train(train_data, probe, optim, criterion, bsz=1, silent=False):
if not silent: train_data = tqdm(train_data)
for input_ids, output_mask, _, label in train_data:
input_ids = input_ids.cuda()
output_mask = output_mask.cuda()
label = label.cuda()
optim.zero_grad()
output = probe(input_ids)
#mask to candidate senses for target word
output = torch.mul(output, output_mask)
#set masked out items to -inf to get proper probabilities over the candidate senses
output[output == 0] = float('-inf')
output = F.softmax(output, dim=-1)
loss = criterion(output, label)
batch_sz = loss.size(0)
loss = loss.sum()/batch_sz
loss.backward()
optim.step()
return probe, optim
def _eval(eval_data, probe, label_space):
eval_preds = []
for input_ids, output_mask, inst, _ in eval_data:
input_ids = input_ids.cuda()
output_mask = output_mask.cuda()
#run example through model
with torch.no_grad():
output = probe(input_ids)
#mask to candidate senses for target word
output = torch.mul(output, output_mask)
#set masked out items to -inf to get proper probabilities over the candidate senses
output[output == 0] = float('-inf')
output = F.softmax(output, dim=-1)
#get predicted label
pred_id = output.topk(1, dim=-1)[1].squeeze().item()
pred_label = label_space[pred_id]
eval_preds.append((inst[0], pred_label))
return eval_preds
def _eval_with_backoff(eval_data, probe, label_space, wn_senses, coverage, keys):
eval_preds = []
for key, (input_ids, output_mask, inst, _) in zip(keys, eval_data):
input_ids = input_ids.cuda()
output_mask = output_mask.cuda()
if key in coverage:
#run example through model
with torch.no_grad():
output = probe(input_ids)
output = torch.mul(output, output_mask)
#set masked out items to -inf to get proper probabilities over the candidate senses
output[output == 0] = float('-inf')
output = F.softmax(output, dim=-1)
#get predicted label
pred_id = output.topk(1, dim=-1)[1].squeeze().item()
pred_label = label_space[pred_id]
eval_preds.append((inst[0], pred_label))
#backoff to wsd for lemma+pos
else:
#this is ws1 for given key
pred_label = wn_senses[key][0]
eval_preds.append((inst[0], pred_label))
return eval_preds
def train_probe(args):
lr = args.lr
bsz = args.bsz
#create passed in ckpt dir if doesn't exist
if not os.path.exists(args.ckpt): os.mkdir(args.ckpt)
'''
LOAD PRETRAINED BERT MODEL
'''
#model loading code based on pytorch_transformers README example
tokenizer = load_tokenizer(args.encoder_name)
pretrained_model, output_dim = load_pretrained_model(args.encoder_name)
pretrained_model = pretrained_model.cuda()
'''
LOADING IN TRAINING AND EVAL DATA
'''
print('Loading data + preprocessing...')
sys.stdout.flush()
#loading WSD (semcor) data + convert to supersenses
train_path = os.path.join(args.data_path, 'Training_Corpora/SemCor/')
train_data = load_data(train_path, 'semcor')
#filter train data for k-shot learning
if args.kshot > 0:
train_data = filter_k_examples(train_data, args.kshot)
task_labels, label_map = get_label_space(train_data)
print('num labels = {} + 1 unknown label'.format(len(task_labels)-1))
train_data = preprocess(tokenizer, pretrained_model, train_data, task_labels, label_map)
train_data = batchify(train_data, bsz=args.bsz)
num_epochs = args.epochs
if args.kshot > 0:
NUM_STEPS = 176600 #hard coded for fair comparision with full model on default num. of epochs
num_batches = len(train_data)
num_epochs = NUM_STEPS//num_batches #recalculate number of epochs
overflow_steps = NUM_STEPS%num_batches #num steps in last overflow epoch (if there is one, otherwise 0)
t_total = NUM_STEPS #manually set number of steps for lr schedule
if overflow_steps > 0: num_epochs+=1 #add extra epoch for overflow steps
print('Overriding args.epochs and training for {} epochs...'.format(epochs))
#loading eval data & convert to supersense tags
#dev set = semeval2007
semeval2007_path = os.path.join(args.data_path, 'Evaluation_Datasets/semeval2007/')
semeval2007_data = load_data(semeval2007_path, 'semeval2007')
semeval2007_data = preprocess(tokenizer, pretrained_model, semeval2007_data, task_labels, label_map)
semeval2007_data = batchify(semeval2007_data, bsz=1)
'''
SET UP PROBING MODEL FOR TASK
'''
#probing model = projection layer to label space, loss function, and optimizer
probe = torch.nn.Linear(output_dim, len(task_labels))
probe = probe.cuda()
criterion = torch.nn.CrossEntropyLoss(reduction='none')
optim = torch.optim.Adam(probe.parameters(), lr=lr)
'''
TRAIN PROBING MODEL ON SEMCOR DATA
'''
best_dev_f1 = 0.
print('Training probe...')
sys.stdout.flush()
for epoch in range(1, num_epochs+1):
#train on full dataset
probe_optim = _train(train_data, probe, optim, criterion, bsz=bsz, silent=args.silent)
#eval probe on dev set (semeval2007)
eval_preds = _eval(semeval2007_data, probe, task_labels)
#generate predictions file
pred_filepath = os.path.join(args.ckpt, 'tmp_predictions.txt')
with open(pred_filepath, 'w') as f:
for inst, prediction in eval_preds:
f.write('{} {}\n'.format(inst, prediction))
#run predictions through scorer
gold_filepath = os.path.join(args.data_path, 'Evaluation_Datasets/semeval2007/semeval2007.gold.key.txt')
scorer_path = os.path.join(args.data_path, 'Evaluation_Datasets')
_, _, dev_f1 = evaluate_output(scorer_path, gold_filepath, pred_filepath)
print('Dev f1 after {} epochs = {}'.format(epoch, dev_f1))
sys.stdout.flush()
if dev_f1 >= best_dev_f1:
print('updating best model at epoch {}...'.format(epoch))
sys.stdout.flush()
best_dev_f1 = dev_f1
#save to file if best probe so far on dev set
probe_fname = os.path.join(args.ckpt, 'best_model.ckpt')
with open(probe_fname, 'wb') as f:
torch.save(probe.state_dict(), f)
sys.stdout.flush()
#shuffle train data after every epoch
random.shuffle(train_data)
return
def evaluate_probe(args):
print('Evaluating WSD probe on {}...'.format(args.split))
'''
LOAD TOKENIZER + BERT MODEL
'''
tokenizer = load_tokenizer(args.encoder_name)
pretrained_model, output_dim = load_pretrained_model(args.encoder_name)
pretrained_model = pretrained_model.cuda()
'''
GET LABEL SPACE
'''
train_path = os.path.join(args.data_path, 'Training_Corpora/SemCor/')
train_data = load_data(train_path, 'semcor')
task_labels, label_map = get_label_space(train_data)
#for backoff eval
train_keys = wn_keys(train_data)
coverage = set(train_keys)
'''
LOAD TRAINED PROBE
'''
probe = torch.nn.Linear(output_dim, len(task_labels))
probe_path = os.path.join(args.ckpt, 'best_model.ckpt')
probe.load_state_dict(torch.load(probe_path))
probe = probe.cuda()
'''
LOAD EVAL SET
'''
eval_path = os.path.join(args.data_path, 'Evaluation_Datasets/{}/'.format(args.split))
eval_data = load_data(eval_path, args.split)
#for backoff
eval_keys = wn_keys(eval_data)
eval_data = preprocess(tokenizer, pretrained_model, eval_data, task_labels, label_map)
eval_data = batchify(eval_data, bsz=1)
'''
EVALUATE PROBE w/o backoff
'''
eval_preds = _eval(eval_data, probe, task_labels)
#generate predictions file
pred_filepath = os.path.join(args.ckpt, './{}_predictions.txt'.format(args.split))
with open(pred_filepath, 'w') as f:
for inst, prediction in eval_preds:
f.write('{} {}\n'.format(inst, prediction))
#run predictions through scorer
gold_filepath = os.path.join(eval_path, '{}.gold.key.txt'.format(args.split))
scorer_path = os.path.join(args.data_path, 'Evaluation_Datasets')
p, r, f1 = evaluate_output(scorer_path, gold_filepath, pred_filepath)
print('f1 of WSD probe on {} test set = {}'.format(args.split, f1))
'''
EVALUATE PROBE with backoff
'''
wn_path = os.path.join(args.data_path, 'Data_Validation/candidatesWN30.txt')
wn_senses = load_wn_senses(wn_path)
eval_preds = _eval_with_backoff(eval_data, probe, task_labels, wn_senses, coverage, eval_keys)
#generate predictions file
pred_filepath = os.path.join(args.ckpt, './{}_backoff_predictions.txt'.format(args.split))
with open(pred_filepath, 'w') as f:
for inst, prediction in eval_preds:
f.write('{} {}\n'.format(inst, prediction))
#run predictions through scorer
gold_filepath = os.path.join(eval_path, '{}.gold.key.txt'.format(args.split))
scorer_path = os.path.join(args.data_path, 'Evaluation_Datasets')
p, r, f1 = evaluate_output(scorer_path, gold_filepath, pred_filepath)
print('f1 of BERT probe (with backoff) = {}'.format(f1))
return
if __name__ == "__main__":
if not torch.cuda.is_available():
print("Need available GPU(s) to run this model...")
quit()
args = parser.parse_args()
print(args)
#set random seeds
torch.manual_seed(args.rand_seed)
os.environ['PYTHONHASHSEED'] = str(args.rand_seed)
torch.cuda.manual_seed(args.rand_seed)
torch.cuda.manual_seed_all(args.rand_seed)
np.random.seed(args.rand_seed)
random.seed(args.rand_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic=True
if args.eval:
evaluate_probe(args)
else:
train_probe(args)
| [
"torch.mul",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.random.seed",
"os.mkdir",
"sys.stdout.flush",
"random.shuffle",
"torch.cat",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.loa... | [((441, 513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""BERT Frozen Probing Model for WSD"""'}), "(description='BERT Frozen Probing Model for WSD')\n", (464, 513), False, 'import argparse\n'), ((3006, 3021), 'tqdm.tqdm', 'tqdm', (['text_data'], {}), '(text_data)\n', (3010, 3021), False, 'from tqdm import tqdm\n'), ((7666, 7684), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7682, 7684), False, 'import sys\n'), ((7752, 7808), 'os.path.join', 'os.path.join', (['args.data_path', '"""Training_Corpora/SemCor/"""'], {}), "(args.data_path, 'Training_Corpora/SemCor/')\n", (7764, 7808), False, 'import os\n'), ((8906, 8970), 'os.path.join', 'os.path.join', (['args.data_path', '"""Evaluation_Datasets/semeval2007/"""'], {}), "(args.data_path, 'Evaluation_Datasets/semeval2007/')\n", (8918, 8970), False, 'import os\n'), ((9404, 9447), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (9429, 9447), False, 'import torch\n'), ((9596, 9614), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9612, 9614), False, 'import sys\n'), ((11209, 11265), 'os.path.join', 'os.path.join', (['args.data_path', '"""Training_Corpora/SemCor/"""'], {}), "(args.data_path, 'Training_Corpora/SemCor/')\n", (11221, 11265), False, 'import os\n'), ((11547, 11589), 'os.path.join', 'os.path.join', (['args.ckpt', '"""best_model.ckpt"""'], {}), "(args.ckpt, 'best_model.ckpt')\n", (11559, 11589), False, 'import os\n'), ((12446, 12497), 'os.path.join', 'os.path.join', (['args.data_path', '"""Evaluation_Datasets"""'], {}), "(args.data_path, 'Evaluation_Datasets')\n", (12458, 12497), False, 'import os\n'), ((12689, 12755), 'os.path.join', 'os.path.join', (['args.data_path', '"""Data_Validation/candidatesWN30.txt"""'], {}), "(args.data_path, 'Data_Validation/candidatesWN30.txt')\n", (12701, 12755), False, 'import os\n'), ((13260, 13311), 'os.path.join', 'os.path.join', (['args.data_path', '"""Evaluation_Datasets"""'], {}), "(args.data_path, 'Evaluation_Datasets')\n", (13272, 13311), False, 'import os\n'), ((13640, 13673), 'torch.manual_seed', 'torch.manual_seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (13657, 13673), False, 'import torch\n'), ((13729, 13767), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (13751, 13767), False, 'import torch\n'), ((13769, 13811), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.rand_seed'], {}), '(args.rand_seed)\n', (13795, 13811), False, 'import torch\n'), ((13818, 13848), 'numpy.random.seed', 'np.random.seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (13832, 13848), True, 'import numpy as np\n'), ((13850, 13877), 'random.seed', 'random.seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (13861, 13877), False, 'import random\n'), ((2154, 2203), 'torch.cat', 'torch.cat', (['[ids for ids, _, _, _ in d_arr]'], {'dim': '(0)'}), '([ids for ids, _, _, _ in d_arr], dim=0)\n', (2163, 2203), False, 'import torch\n'), ((2222, 2275), 'torch.stack', 'torch.stack', (['[mask for _, mask, _, _ in d_arr]'], {'dim': '(0)'}), '([mask for _, mask, _, _ in d_arr], dim=0)\n', (2233, 2275), False, 'import torch\n'), ((2347, 2400), 'torch.cat', 'torch.cat', (['[label for _, _, _, label in d_arr]'], {'dim': '(0)'}), '([label for _, _, _, label in d_arr], dim=0)\n', (2356, 2400), False, 'import torch\n'), ((4369, 4396), 'torch.cat', 'torch.cat', (['sent_ids'], {'dim': '(-1)'}), '(sent_ids, dim=-1)\n', (4378, 4396), False, 'import torch\n'), ((4971, 4987), 'tqdm.tqdm', 'tqdm', (['train_data'], {}), '(train_data)\n', (4975, 4987), False, 'from tqdm import tqdm\n'), ((5234, 5264), 'torch.mul', 'torch.mul', (['output', 'output_mask'], {}), '(output, output_mask)\n', (5243, 5264), False, 'import torch\n'), ((5401, 5426), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (5410, 5426), True, 'from torch.nn import functional as F\n'), ((7259, 7284), 'os.path.exists', 'os.path.exists', (['args.ckpt'], {}), '(args.ckpt)\n', (7273, 7284), False, 'import os\n'), ((7286, 7305), 'os.mkdir', 'os.mkdir', (['args.ckpt'], {}), '(args.ckpt)\n', (7294, 7305), False, 'import os\n'), ((9914, 9960), 'os.path.join', 'os.path.join', (['args.ckpt', '"""tmp_predictions.txt"""'], {}), "(args.ckpt, 'tmp_predictions.txt')\n", (9926, 9960), False, 'import os\n'), ((10139, 10231), 'os.path.join', 'os.path.join', (['args.data_path', '"""Evaluation_Datasets/semeval2007/semeval2007.gold.key.txt"""'], {}), "(args.data_path,\n 'Evaluation_Datasets/semeval2007/semeval2007.gold.key.txt')\n", (10151, 10231), False, 'import os\n'), ((10244, 10295), 'os.path.join', 'os.path.join', (['args.data_path', '"""Evaluation_Datasets"""'], {}), "(args.data_path, 'Evaluation_Datasets')\n", (10256, 10295), False, 'import os\n'), ((10435, 10453), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10451, 10453), False, 'import sys\n'), ((10779, 10797), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10795, 10797), False, 'import sys\n'), ((10841, 10867), 'random.shuffle', 'random.shuffle', (['train_data'], {}), '(train_data)\n', (10855, 10867), False, 'import random\n'), ((11613, 11635), 'torch.load', 'torch.load', (['probe_path'], {}), '(probe_path)\n', (11623, 11635), False, 'import torch\n'), ((13486, 13511), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13509, 13511), False, 'import torch\n'), ((4469, 4484), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4482, 4484), False, 'import torch\n'), ((5786, 5801), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5799, 5801), False, 'import torch\n'), ((5890, 5920), 'torch.mul', 'torch.mul', (['output', 'output_mask'], {}), '(output, output_mask)\n', (5899, 5920), False, 'import torch\n'), ((6059, 6084), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (6068, 6084), True, 'from torch.nn import functional as F\n'), ((10548, 10566), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10564, 10566), False, 'import sys\n'), ((10658, 10700), 'os.path.join', 'os.path.join', (['args.ckpt', '"""best_model.ckpt"""'], {}), "(args.ckpt, 'best_model.ckpt')\n", (10670, 10700), False, 'import os\n'), ((6558, 6573), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6571, 6573), False, 'import torch\n'), ((6619, 6649), 'torch.mul', 'torch.mul', (['output', 'output_mask'], {}), '(output, output_mask)\n', (6628, 6649), False, 'import torch\n'), ((6796, 6821), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(-1)'}), '(output, dim=-1)\n', (6805, 6821), True, 'from torch.nn import functional as F\n')] |
'''@file alignment_decoder.py
contains the AlignmentDecoder'''
import os
import struct
import numpy as np
import tensorflow as tf
import decoder
class AlignmentDecoder(decoder.Decoder):
'''feature Decoder'''
def __call__(self, inputs, input_seq_length):
'''decode a batch of data
Args:
inputs: the inputs as a dictionary of [batch_size x time x ...]
tensors
input_seq_length: the input sequence lengths as a dictionary of
[batch_size] vectors
Returns:
- the decoded sequences as a dictionary of outputs
'''
with tf.name_scope('alignment_decoder'):
#create the decoding graph
logits, logits_seq_length = self.model(
inputs, input_seq_length, targets=[],
target_seq_length=[], is_training=False)
#compute the log probabilities
logprobs = tf.log(tf.nn.softmax(logits.values()[0]))
#read the pd prior
prior = np.load(self.conf['prior'])
#compute posterior to pseudo likelihood
loglikes = logprobs - np.log(prior)
outputs = {o:(loglikes, logits_seq_length[o]) for o in logits}
return outputs
def write(self, outputs, directory, names):
'''write the output of the decoder to disk
args:
outputs: the outputs of the decoder
directory: the directory where the results should be written
names: the names of the utterances in outputs
'''
for o in outputs:
if not os.path.isdir(os.path.join(directory, o)):
os.makedirs(os.path.join(directory, o))
batch_size = outputs[o][0].shape[0]
scp_file = os.path.join(directory, o, 'feats.scp')
ark_file = os.path.join(directory, o, 'loglikes.ark')
for i in range(batch_size):
output = outputs[o][0][i, :outputs[o][1][i]]
arkwrite(scp_file, ark_file, names[i], output)
def update_evaluation_loss(self, loss, outputs, references,
reference_seq_length):
'''update the evaluation loss
args:
loss: the current evaluation loss
outputs: the outputs of the decoder as a dictionary
references: the references as a dictionary
reference_seq_length: the sequence lengths of the references
Returns:
an op to update the evalution loss
'''
raise Exception('AlignmentDecoder can not be used to validate')
def arkwrite(scp_file, ark_file, name, array):
'''write the array to the arkfile'''
scp_fid = open(scp_file, 'a')
ark_fid = open(ark_file, 'ab')
rows, cols = array.shape
ark_fid.write(struct.pack('<%ds'%(len(name)), name))
pos = ark_fid.tell()
ark_fid.write(struct.pack('<xcccc', 'B', 'F', 'M', ' '))
ark_fid.write(struct.pack('<bi', 4, rows))
ark_fid.write(struct.pack('<bi', 4, cols))
ark_fid.write(array)
scp_fid.write('%s %s:%s\n' % (name, ark_file, pos))
scp_fid.close()
ark_fid.close()
| [
"numpy.log",
"os.path.join",
"struct.pack",
"tensorflow.name_scope",
"numpy.load"
] | [((2903, 2944), 'struct.pack', 'struct.pack', (['"""<xcccc"""', '"""B"""', '"""F"""', '"""M"""', '""" """'], {}), "('<xcccc', 'B', 'F', 'M', ' ')\n", (2914, 2944), False, 'import struct\n'), ((2964, 2991), 'struct.pack', 'struct.pack', (['"""<bi"""', '(4)', 'rows'], {}), "('<bi', 4, rows)\n", (2975, 2991), False, 'import struct\n'), ((3011, 3038), 'struct.pack', 'struct.pack', (['"""<bi"""', '(4)', 'cols'], {}), "('<bi', 4, cols)\n", (3022, 3038), False, 'import struct\n'), ((634, 668), 'tensorflow.name_scope', 'tf.name_scope', (['"""alignment_decoder"""'], {}), "('alignment_decoder')\n", (647, 668), True, 'import tensorflow as tf\n'), ((1034, 1061), 'numpy.load', 'np.load', (["self.conf['prior']"], {}), "(self.conf['prior'])\n", (1041, 1061), True, 'import numpy as np\n'), ((1785, 1824), 'os.path.join', 'os.path.join', (['directory', 'o', '"""feats.scp"""'], {}), "(directory, o, 'feats.scp')\n", (1797, 1824), False, 'import os\n'), ((1848, 1890), 'os.path.join', 'os.path.join', (['directory', 'o', '"""loglikes.ark"""'], {}), "(directory, o, 'loglikes.ark')\n", (1860, 1890), False, 'import os\n'), ((1149, 1162), 'numpy.log', 'np.log', (['prior'], {}), '(prior)\n', (1155, 1162), True, 'import numpy as np\n'), ((1629, 1655), 'os.path.join', 'os.path.join', (['directory', 'o'], {}), '(directory, o)\n', (1641, 1655), False, 'import os\n'), ((1686, 1712), 'os.path.join', 'os.path.join', (['directory', 'o'], {}), '(directory, o)\n', (1698, 1712), False, 'import os\n')] |
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import LabelEncoder
def process_data_orig(test_size, n_splits, label, save_label):
'''
process_data_orig will create train, test, validation folds for all classes and save them to the folder the script is run in
requires path to final, feature table outputted from GDD Classifier R Script
input:
test_size = integer representing proportion out of 100 which should be used to make the testing set (default = 20, meaning test size is .2 of overall dataset)
n_splits = integer, number of ensembles to create
label = string input from .sh file representing specific label provided for each run ('' = normal, 'bal' = balanced, can define more)
save_label = Boolean for if you want to save the labelled versions of feature tables (X) and labels (y)
output:
x_train_folds, y_train_folds = list of np.arrays representing each ensemble training set for feature tables (x_train_folds) and labels (y_train_folds)
x_test_folds, y_test_folds = list of np.arrays representing each ensemble validation set for feature tables (x_test_folds) and labels (y_test_folds)
'''
data = pd.read_csv('/home/darmofam/morris/classifier/feature_table_all_cases_sigs.tsv', sep='\t') #may need to change path to feature table
#Removing those not annotated as a training sample (i.e. other, repeat patient samples, low purity), sarcoma
data = data[data.Classification_Category == 'train']
data = data[data.Cancer_Type != 'Sarcoma.NOS']
labels = data.Cancer_Type
ctypes = set(labels)
#print(ctypes) #should be a list of 41 cancer type labels
#print(len(ctypes)) #should be 41
#splitting data to appropriate test size and saving value counts of each
data_train_labelled, data_test_labelled, labels_train_labelled, labels_test_labelled = train_test_split(data, labels, test_size=test_size/100, random_state = 0)
data_train_labelled.Cancer_Type.value_counts().to_csv('train_N' + label + '.csv')
if save_label: #only if you are saving the datasets with the cancer type labels
data_train_labelled, data_test_labelled, labels_train_labelled, labels_test_labelled = train_test_split(data, labels, test_size=test_size/100, random_state = 0)
data_train_labelled.to_csv('ft_train_labelled' + label + '.csv', header = True, index = True)
labels_train_labelled.to_csv('labels_train_labelled' + label + '.csv', header = True, index = True)
data_test_labelled.to_csv('ft_test_labelled' + label + '.csv', header = True, index = True)
labels_test_labelled.to_csv('labels_test_labelled' + label + '.csv', header = True, index = True)
print('done')
#data drops the following labels
data = data.drop(['SAMPLE_ID', 'CANCER_TYPE', 'CANCER_TYPE_DETAILED', 'SAMPLE_TYPE', 'PRIMARY_SITE', 'METASTATIC_SITE', 'Cancer_Type', 'Classification_Category'], axis=1)
data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size=test_size/100, random_state = 0)
#saving data tables ->
data_train.to_csv('ft_train' + label + '.csv', header = True, index = True)
labels_train.to_csv('labels_train' + label + '.csv', header = True, index = True)
data_test.to_csv('ft_test' + label + '.csv', header = True, index = True)
labels_test.to_csv('labels_test' + label + '.csv', header = True, index = True)
#now split into ensemble folds and encode tumor type labels
sss = StratifiedShuffleSplit(n_splits=n_splits, random_state=0)
sss.get_n_splits(data_train, labels_train)
encoder = LabelEncoder()
x_train_folds, x_test_folds= [], []
y_train_folds, y_test_folds= [], []
for train_index, test_index in sss.split(data_train, labels_train):
#for each split, append np.arrays for training and testing feature tables, and encoded labels
x_train, x_test = np.array(data_train.iloc[train_index]), np.array(data_train.iloc[test_index])
y_train, y_test = np.array(labels_train.iloc[train_index]), np.array(labels_train.iloc[test_index])
y_train = encoder.fit_transform(y_train)
y_test = encoder.fit_transform(y_test)
x_train_folds.append(x_train)
x_test_folds.append(x_test)
y_train_folds.append(y_train)
y_test_folds.append(y_test)
return x_train_folds, x_test_folds, y_train_folds, y_test_folds
test_size = 20
n_splits = 10
if len(sys.argv) > 1:
label = '_' + sys.argv[1]
print(label)
else:
label = ''
x_train_folds, x_test_folds, y_train_folds, y_test_folds = process_data_orig(test_size, n_splits, label, save_label=False)
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.array"
] | [((1276, 1371), 'pandas.read_csv', 'pd.read_csv', (['"""/home/darmofam/morris/classifier/feature_table_all_cases_sigs.tsv"""'], {'sep': '"""\t"""'}), "('/home/darmofam/morris/classifier/feature_table_all_cases_sigs.tsv'\n , sep='\\t')\n", (1287, 1371), True, 'import pandas as pd\n'), ((1926, 1999), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(test_size / 100)', 'random_state': '(0)'}), '(data, labels, test_size=test_size / 100, random_state=0)\n', (1942, 1999), False, 'from sklearn.model_selection import train_test_split\n'), ((2993, 3066), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(test_size / 100)', 'random_state': '(0)'}), '(data, labels, test_size=test_size / 100, random_state=0)\n', (3009, 3066), False, 'from sklearn.model_selection import train_test_split\n'), ((3477, 3534), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'n_splits', 'random_state': '(0)'}), '(n_splits=n_splits, random_state=0)\n', (3499, 3534), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((3591, 3605), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3603, 3605), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2253, 2326), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(test_size / 100)', 'random_state': '(0)'}), '(data, labels, test_size=test_size / 100, random_state=0)\n', (2269, 2326), False, 'from sklearn.model_selection import train_test_split\n'), ((3865, 3903), 'numpy.array', 'np.array', (['data_train.iloc[train_index]'], {}), '(data_train.iloc[train_index])\n', (3873, 3903), True, 'import numpy as np\n'), ((3905, 3942), 'numpy.array', 'np.array', (['data_train.iloc[test_index]'], {}), '(data_train.iloc[test_index])\n', (3913, 3942), True, 'import numpy as np\n'), ((3963, 4003), 'numpy.array', 'np.array', (['labels_train.iloc[train_index]'], {}), '(labels_train.iloc[train_index])\n', (3971, 4003), True, 'import numpy as np\n'), ((4005, 4044), 'numpy.array', 'np.array', (['labels_train.iloc[test_index]'], {}), '(labels_train.iloc[test_index])\n', (4013, 4044), True, 'import numpy as np\n')] |
# Copyright (c) <NAME>.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.
# See https://github.com/dietmarwo/fast-cma-es/blob/master/tutorials/Scheduling.adoc for a detailed description.
import math
import pandas as pd
import numpy as np
import sys, math, time
from fcmaes import retry, advretry, mode, modecpp
from fcmaes.optimizer import logger, Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Dual_annealing, Differential_evolution, Minimize
from scipy.optimize import Bounds
import ctypes as ct
import multiprocessing as mp
from numba import njit, numba
STATION_NUM = 12 # number of dyson ring stations
TRAJECTORY_NUM = 50 # we select 10 mothership trajectories from these trajectories
ASTEROID_NUM = 83454 # number of asteroids
MAX_TIME = 20.0 # mission time in years
WAIT_TIME = 90/365.25 # years, after arrival wait until construction may start
ALPHA = 6.0e-9 # conversion factor time of flight -> arrival mass
#A_DYSON = 1.3197946923098154 # ESAs dyson sphere
A_DYSON = 1.1 # Tsinhuas dyson sphere
DAY = 24 * 3600
YEAR = DAY*365.25
@njit(fastmath=True)
def select(asteroid, station, trajectory, mass, transfer_start, transfer_time, x):
trajectories = trajectory_selection(x, TRAJECTORY_NUM)[1] # select 10 trajectories representing the 10 mothership trajectories
stations = dyson_stations(x, STATION_NUM) # derive dyson_stations targeted at time slots from argument vector
times = timings(x, STATION_NUM) # derive station build time slot boundaries from argument vector (in years)
slot_mass = np.zeros(STATION_NUM) # mass sum per time slot
ast_val = np.zeros(ASTEROID_NUM) # deployed mass for asteroid
ast_slot = np.zeros(ASTEROID_NUM, dtype=numba.int32) # build time slot used for asteroid
for i in range(asteroid.size):
tr = int(trajectory[i]) # current trajectory
if trajectories[tr] == 0: # trajectory not selected
continue
ast_id = int(asteroid[i]) # asteroid transferred
stat = int(station[i]) # dyson sphere station targeted
m = mass[i] # estimated asteroid mass at arrival time
time_of_flight = transfer_time[i] # TOF of asteroid transfer
arrival_time = transfer_start[i] + transfer_time[i] # arrival time of asteroid transfer
# which station time slot ?
for slot in range(STATION_NUM):
max_time = times[slot+1] # time interval of time slot
slot_time = times[slot]
min_time = slot_time + WAIT_TIME # we have to wait 90 days
if min_time >= MAX_TIME:
continue
if arrival_time >= slot_time and arrival_time <= max_time: # inside time slot
if stat == stations[slot]: # does the station fit?
tof = time_of_flight
#if we have to fly a non optimal transfer, arrival mass is reduced
if arrival_time < min_time: # 90 DAYS are not yet over
to_add = min_time - arrival_time # add time difference
to_add *= math.sqrt(1 + to_add/WAIT_TIME) # add some more time to enable transfer
tof += to_add
mval = (1.0 - YEAR*tof*ALPHA) * m # estimated asteroid mass at arrival time
if ast_val[ast_id] > 0: # asteroid already transferred
old_slot = ast_slot[ast_id]
min_mass = np.amin(slot_mass) # greedily replace if current mass is higher
old_mass = slot_mass[old_slot] # but never replace at a nearly minimal slot
if (old_slot == slot or min_mass < 0.99*old_mass) and ast_val[ast_id] < mval:
# replace with actual transfer, remove old asteroid mass
slot_mass[old_slot] -= ast_val[ast_id]
else: # keep old transfer, don't use the new one
mval = 0
if mval > 0: # register actual transfer
slot_mass[slot] += mval
ast_val[ast_id] = mval
ast_slot[ast_id] = slot
slot_mass.sort()
min_mass = slot_mass[0]
f = 1.0
for m in slot_mass:
# help the optimizer in case the minimum is 0
min_mass += f*m
f *= 0.5
return min_mass, slot_mass
class fitness(object): # the objective function
def __init__(self, transfers):
self.evals = mp.RawValue(ct.c_long, 0) # writable across python processes
self.best_y = mp.RawValue(ct.c_double, math.inf) # writable across python processes
self.t0 = time.perf_counter()
self.transfers = transfers
self.asteroid = transfers["asteroid"].to_numpy()
self.station = transfers["station"].to_numpy()
self.trajectory = transfers["trajectory"].to_numpy()
self.transfer_start = transfers["transfer_start"].to_numpy()
self.transfer_time = transfers["transfer_time"].to_numpy()
self.mass = transfers["mass"].to_numpy()
self.dv = transfers["dv"].to_numpy()
self.trajectory_dv = trajectory_dv(self.asteroid, self.trajectory, self.dv)
self.nobj = 2
self.ncon = 0
def __call__(self, x):
# determine the minimal station mass
min_mass, slot_mass = select(self.asteroid, self.station, self.trajectory, self.mass,
self.transfer_start, self.transfer_time, x)
sdv = select_dvs(self.trajectory_dv, x)
y = -score(min_mass, sdv)
self.evals.value += 1
if y < self.best_y.value:
self.best_y.value = y
trajectories = trajectory_selection(x, TRAJECTORY_NUM)[0]
stations = dyson_stations(x, STATION_NUM)
times = timings(x, STATION_NUM)
sc = score(np.amin(slot_mass), sdv)
logger().info("evals = {0}: time = {1:.1f} s = {2:.0f} a = {3:.0f} t = {4:s} s = {5:s} b = {6:s} m = {7:s} dv = {8:s}"
.format(self.evals.value, dtime(self.t0), sc, ast_num(x, self.asteroid, self.trajectory),
str([round(ti,2) for ti in times[1:-1]]),
str([int(si) for si in stations]),
str([int(ti) for ti in trajectories]),
str([round(mi,2) for mi in slot_mass*1E-15]),
str([round(di,2) for di in sdv])
))
return y
def fun(self, x):
min_mass, slot_mass = select(self.asteroid, self.station, self.trajectory, self.mass,
self.transfer_start, self.transfer_time, x)
sdv = select_dvs(self.trajectory_dv, x)
scr, dv_val = score_vals(np.amin(slot_mass), sdv)
y = -scr
ys = [-min_mass*1E-10, dv_val]
self.evals.value += 1
if y < self.best_y.value:
self.best_y.value = y
trajectories = trajectory_selection(x, TRAJECTORY_NUM)[0]
stations = dyson_stations(x, STATION_NUM)
times = timings(x, STATION_NUM)
sc = score(np.amin(slot_mass), sdv)
logger().info("evals = {0}: time = {1:.1f} s = {2:.0f} a = {3:.0f} t = {4:s} s = {5:s} b = {6:s} m = {7:s} dv = {8:s}"
.format(self.evals.value, dtime(self.t0), -self.best_y.value, ast_num(x, self.asteroid, self.trajectory),
str([round(ti,2) for ti in times[1:-1]]),
str([int(si) for si in stations]),
str([int(ti) for ti in trajectories]),
str([round(mi,2) for mi in slot_mass*1E-15]),
str([round(di,2) for di in sdv])
))
return ys
def optimize():
name = 'tsin3000.60' # 60 trajectories to choose from
# name = 'tsin3000.10' # 10 fixed trajectories
transfers = pd.read_csv('data/' + name + '.xz', sep=' ', usecols=[1,2,3,4,5,6,7], compression='xz',
names=['asteroid', 'station', 'trajectory', 'mass', 'dv', 'transfer_start', 'transfer_time'])
# uncomment to write a clear text csv
# transfers.to_csv('data/' + name + '.txt', sep=' ', header=False)
global TRAJECTORY_NUM, ASTEROID_NUM # adjust number of asteroids / trajectories
TRAJECTORY_NUM = int(np.amax(transfers["trajectory"]) + 1)
ASTEROID_NUM = int(np.amax(transfers["asteroid"]) + 1)
# bounds for the objective function
dim = 10+2*STATION_NUM-1
lower_bound = np.zeros(dim)
# lower_bound[10+STATION_NUM:dim] = 0.00001
upper_bound = np.zeros(dim)
lower_bound[:] = 0.0000001
upper_bound[10:] = 0.9999999
upper_bound[:10] = TRAJECTORY_NUM-0.00001 # trajectory indices
bounds = Bounds(lower_bound, upper_bound)
fit = fitness(transfers)
fit.bounds = bounds
# multi objective optimization 'modecpp' multi threaded, NSGA-II population update
xs, front = modecpp.retry(fit.fun, fit.nobj, fit.ncon, fit.bounds, num_retries=640, popsize = 96,
max_evaluations = 3000000, nsga_update = True, logger = logger(), workers=16)
# multi objective optimization 'modecpp' multi threaded, DE population update
# xs, front = modecpp.retry(fit.fun, fit.nobj, fit.ncon, fit.bounds, num_retries=640, popsize = 67,
# max_evaluations = 3000000, nsga_update = False, logger = logger(), workers=16)
# smart boundary management (SMB) with DE->CMA
# store = advretry.Store(fitness(transfers), bounds, num_retries=10000, max_eval_fac=5.0, logger=logger())
# advretry.retry(store, de_cma(10000).minimize)
# smart boundary management (SMB) with CMA-ES
# store = advretry.Store(fitness(transfers), bounds, num_retries=10000, max_eval_fac=5.0, logger=logger())
# advretry.retry(store, Cma_cpp(10000).minimize)
# BiteOpt algorithm multi threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, Bite_cpp(1000000, M=1).minimize, num_retries=3200)
# CMA-ES multi threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, Cma_cpp(1000000).minimize, num_retries=3200)
# scipy minimize algorithm multi threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, Minimize(1000000).minimize, num_retries=3200)
# fcmaes differential evolution multi threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, De_cpp(1000000).minimize, num_retries=3200)
# scipy differential evolution multi threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, Differential_evolution(1000000).minimize, num_retries=3200)
# scipy dual annealing multi threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, Dual_annealing(1000000).minimize, num_retries=3200)
# scipy differential evolution single threaded
# store = retry.Store(fitness(transfers), bounds, logger=logger())
# retry.retry(store, Differential_evolution(1000000).minimize, num_retries=320, workers=1)
return store.get_xs(), store.get_ys()
# utility functions
@njit(fastmath=True)
def next_free(used, p):
while used[p]:
p = (p + 1) % used.size
used[p] = True
return p
@njit(fastmath=True)
def disjoined(s, n):
disjoined_s = np.zeros(s.size, dtype=numba.int32)
used = np.zeros(n, dtype=numba.boolean)
for i in range(s.size):
disjoined_s[i] = next_free(used, s[i])
return disjoined_s, used
@njit(fastmath=True)
def timings(x, n):
times = np.zeros(n+1)
for i in range(n-1):
times[i] = MAX_TIME * x[10+STATION_NUM+i]
times[n-1] = 0
times[n] = MAX_TIME
times.sort()
return times
@njit(fastmath=True)
def dyson_stations(x, n):
stations = np.argsort(x[10:10+n])
# station numbers start with 1
return np.array([s+1 for s in stations])
@njit(fastmath=True)
def trajectory_selection(x, n):
trajectories = np.zeros(10, dtype=numba.int32)
for i in range(10):
trajectories[i] = int(x[i])
return disjoined(trajectories, n)
@njit(fastmath=True)
def score(min_mass, trajectory_dv):
mass_val = min_mass * 1E-10
dv_val = 0
for dv in trajectory_dv:
dv_val += (1.0 + dv/50.0)**2
return mass_val / (A_DYSON * A_DYSON * dv_val)
@njit(fastmath=True)
def score_vals(min_mass, trajectory_dv):
mass_val = min_mass * 1E-10
dv_val = 0
for dv in trajectory_dv:
dv_val += (1.0 + dv/50.0)**2
return mass_val / (A_DYSON * A_DYSON * dv_val), dv_val
@njit(fastmath=True)
def trajectory_dv(asteroid, trajectory, delta_v):
ast_dv = np.zeros((TRAJECTORY_NUM,ASTEROID_NUM))
for i in range(asteroid.size):
ast_id = int(asteroid[i]) # asteroid transferred
tr = trajectory[i] # current trajectory
ast_dv[tr, ast_id] = delta_v[i] # mothership delta velocity to reach the asteroid
trajectory_dv = np.sum(ast_dv, axis=1)
return trajectory_dv
@njit(fastmath=True)
def select_dvs(bdv, x):
trajectories = trajectory_selection(x, TRAJECTORY_NUM)[0]
sdv = np.zeros(10)
for i in range(10):
sdv[i] = bdv[int(trajectories[i])]
return sdv
@njit(fastmath=True)
def ast_num(x, asteroid, trajectory):
asts = np.zeros((ASTEROID_NUM))
trajectories = trajectory_selection(x, TRAJECTORY_NUM)[1]
for i in range(asteroid.size):
if not trajectories[trajectory[i]]: # trajectory not selected
continue
asts[int(asteroid[i])] = 1 # asteroid transferred
return np.sum(asts)
def main():
optimize()
if __name__ == '__main__':
main()
| [
"numpy.amin",
"pandas.read_csv",
"numba.njit",
"time.perf_counter",
"multiprocessing.RawValue",
"fcmaes.optimizer.dtime",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"math.sqrt",
"scipy.optimize.Bounds",
"fcmaes.optimizer.logger",
"numpy.amax"
] | [((1098, 1117), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (1102, 1117), False, 'from numba import njit, numba\n'), ((11541, 11560), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (11545, 11560), False, 'from numba import njit, numba\n'), ((11671, 11690), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (11675, 11690), False, 'from numba import njit, numba\n'), ((11917, 11936), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (11921, 11936), False, 'from numba import njit, numba\n'), ((12137, 12156), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (12141, 12156), False, 'from numba import njit, numba\n'), ((12304, 12323), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (12308, 12323), False, 'from numba import njit, numba\n'), ((12512, 12531), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (12516, 12531), False, 'from numba import njit, numba\n'), ((12735, 12754), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (12739, 12754), False, 'from numba import njit, numba\n'), ((12971, 12990), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (12975, 12990), False, 'from numba import njit, numba\n'), ((13404, 13423), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (13408, 13423), False, 'from numba import njit, numba\n'), ((13625, 13644), 'numba.njit', 'njit', ([], {'fastmath': '(True)'}), '(fastmath=True)\n', (13629, 13644), False, 'from numba import njit, numba\n'), ((1582, 1603), 'numpy.zeros', 'np.zeros', (['STATION_NUM'], {}), '(STATION_NUM)\n', (1590, 1603), True, 'import numpy as np\n'), ((1643, 1665), 'numpy.zeros', 'np.zeros', (['ASTEROID_NUM'], {}), '(ASTEROID_NUM)\n', (1651, 1665), True, 'import numpy as np\n'), ((1710, 1751), 'numpy.zeros', 'np.zeros', (['ASTEROID_NUM'], {'dtype': 'numba.int32'}), '(ASTEROID_NUM, dtype=numba.int32)\n', (1718, 1751), True, 'import numpy as np\n'), ((8106, 8301), 'pandas.read_csv', 'pd.read_csv', (["('data/' + name + '.xz')"], {'sep': '""" """', 'usecols': '[1, 2, 3, 4, 5, 6, 7]', 'compression': '"""xz"""', 'names': "['asteroid', 'station', 'trajectory', 'mass', 'dv', 'transfer_start',\n 'transfer_time']"}), "('data/' + name + '.xz', sep=' ', usecols=[1, 2, 3, 4, 5, 6, 7],\n compression='xz', names=['asteroid', 'station', 'trajectory', 'mass',\n 'dv', 'transfer_start', 'transfer_time'])\n", (8117, 8301), True, 'import pandas as pd\n'), ((8722, 8735), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (8730, 8735), True, 'import numpy as np\n'), ((8803, 8816), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (8811, 8816), True, 'import numpy as np\n'), ((8962, 8994), 'scipy.optimize.Bounds', 'Bounds', (['lower_bound', 'upper_bound'], {}), '(lower_bound, upper_bound)\n', (8968, 8994), False, 'from scipy.optimize import Bounds\n'), ((11731, 11766), 'numpy.zeros', 'np.zeros', (['s.size'], {'dtype': 'numba.int32'}), '(s.size, dtype=numba.int32)\n', (11739, 11766), True, 'import numpy as np\n'), ((11778, 11810), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'numba.boolean'}), '(n, dtype=numba.boolean)\n', (11786, 11810), True, 'import numpy as np\n'), ((11969, 11984), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (11977, 11984), True, 'import numpy as np\n'), ((12199, 12223), 'numpy.argsort', 'np.argsort', (['x[10:10 + n]'], {}), '(x[10:10 + n])\n', (12209, 12223), True, 'import numpy as np\n'), ((12268, 12305), 'numpy.array', 'np.array', (['[(s + 1) for s in stations]'], {}), '([(s + 1) for s in stations])\n', (12276, 12305), True, 'import numpy as np\n'), ((12376, 12407), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'numba.int32'}), '(10, dtype=numba.int32)\n', (12384, 12407), True, 'import numpy as np\n'), ((13062, 13102), 'numpy.zeros', 'np.zeros', (['(TRAJECTORY_NUM, ASTEROID_NUM)'], {}), '((TRAJECTORY_NUM, ASTEROID_NUM))\n', (13070, 13102), True, 'import numpy as np\n'), ((13354, 13376), 'numpy.sum', 'np.sum', (['ast_dv'], {'axis': '(1)'}), '(ast_dv, axis=1)\n', (13360, 13376), True, 'import numpy as np\n'), ((13528, 13540), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (13536, 13540), True, 'import numpy as np\n'), ((13702, 13724), 'numpy.zeros', 'np.zeros', (['ASTEROID_NUM'], {}), '(ASTEROID_NUM)\n', (13710, 13724), True, 'import numpy as np\n'), ((13984, 13996), 'numpy.sum', 'np.sum', (['asts'], {}), '(asts)\n', (13990, 13996), True, 'import numpy as np\n'), ((4620, 4645), 'multiprocessing.RawValue', 'mp.RawValue', (['ct.c_long', '(0)'], {}), '(ct.c_long, 0)\n', (4631, 4645), True, 'import multiprocessing as mp\n'), ((4704, 4738), 'multiprocessing.RawValue', 'mp.RawValue', (['ct.c_double', 'math.inf'], {}), '(ct.c_double, math.inf)\n', (4715, 4738), True, 'import multiprocessing as mp\n'), ((4792, 4811), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4809, 4811), False, 'import sys, math, time\n'), ((6930, 6948), 'numpy.amin', 'np.amin', (['slot_mass'], {}), '(slot_mass)\n', (6937, 6948), True, 'import numpy as np\n'), ((8533, 8565), 'numpy.amax', 'np.amax', (["transfers['trajectory']"], {}), "(transfers['trajectory'])\n", (8540, 8565), True, 'import numpy as np\n'), ((8594, 8624), 'numpy.amax', 'np.amax', (["transfers['asteroid']"], {}), "(transfers['asteroid'])\n", (8601, 8624), True, 'import numpy as np\n'), ((9322, 9330), 'fcmaes.optimizer.logger', 'logger', ([], {}), '()\n', (9328, 9330), False, 'from fcmaes.optimizer import logger, Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Dual_annealing, Differential_evolution, Minimize\n'), ((6025, 6043), 'numpy.amin', 'np.amin', (['slot_mass'], {}), '(slot_mass)\n', (6032, 6043), True, 'import numpy as np\n'), ((7317, 7335), 'numpy.amin', 'np.amin', (['slot_mass'], {}), '(slot_mass)\n', (7324, 7335), True, 'import numpy as np\n'), ((6063, 6071), 'fcmaes.optimizer.logger', 'logger', ([], {}), '()\n', (6069, 6071), False, 'from fcmaes.optimizer import logger, Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Dual_annealing, Differential_evolution, Minimize\n'), ((6224, 6238), 'fcmaes.optimizer.dtime', 'dtime', (['self.t0'], {}), '(self.t0)\n', (6229, 6238), False, 'from fcmaes.optimizer import logger, Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Dual_annealing, Differential_evolution, Minimize\n'), ((7354, 7362), 'fcmaes.optimizer.logger', 'logger', ([], {}), '()\n', (7360, 7362), False, 'from fcmaes.optimizer import logger, Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Dual_annealing, Differential_evolution, Minimize\n'), ((7515, 7529), 'fcmaes.optimizer.dtime', 'dtime', (['self.t0'], {}), '(self.t0)\n', (7520, 7529), False, 'from fcmaes.optimizer import logger, Bite_cpp, Cma_cpp, De_cpp, de_cma, dtime, Dual_annealing, Differential_evolution, Minimize\n'), ((3112, 3145), 'math.sqrt', 'math.sqrt', (['(1 + to_add / WAIT_TIME)'], {}), '(1 + to_add / WAIT_TIME)\n', (3121, 3145), False, 'import sys, math, time\n'), ((3529, 3547), 'numpy.amin', 'np.amin', (['slot_mass'], {}), '(slot_mass)\n', (3536, 3547), True, 'import numpy as np\n')] |
from typing import Optional
import numpy as np
import scipy.special as sp
from arch.univariate.distribution import GeneralizedError as GE
from scipy.stats import gamma
from ._base import DistributionMixin, _format_simulator
class GeneralizedError(GE, DistributionMixin):
def __init__(self, random_state=None):
DistributionMixin.__init__(self)
GE.__init__(self, random_state)
@_format_simulator
def _simulator(self, size: int, reps: Optional[int] = None):
_parameters = self._parameters
if self.custom_dist is None:
if reps is not None:
size = size, reps
nu, *_ = _parameters
randoms = self._random_state.standard_gamma(1 / nu, size) ** (1.0 / nu)
randoms *= 2 * self._random_state.randint(0, 2, size) - 1
scale = np.sqrt(sp.gamma(3.0 / nu) / sp.gamma(1.0 / nu))
return randoms / scale
else:
self.derive_dist_size(size * 2)
nu, *_ = _parameters
randoms = gamma.ppf(self.custom_dist[:size], 1 / nu) ** (1. / nu)
randoms *= 2 * np.asarray(self.custom_dist[size:2 * size] > 0.5, np.float) - 1
scale = np.sqrt(sp.gamma(3.0 / nu) / sp.gamma(1.0 / nu))
self.custom_dist = None # reset simulator
return randoms / scale
| [
"arch.univariate.distribution.GeneralizedError.__init__",
"numpy.asarray",
"scipy.special.gamma",
"scipy.stats.gamma.ppf"
] | [((367, 398), 'arch.univariate.distribution.GeneralizedError.__init__', 'GE.__init__', (['self', 'random_state'], {}), '(self, random_state)\n', (378, 398), True, 'from arch.univariate.distribution import GeneralizedError as GE\n'), ((1039, 1081), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['self.custom_dist[:size]', '(1 / nu)'], {}), '(self.custom_dist[:size], 1 / nu)\n', (1048, 1081), False, 'from scipy.stats import gamma\n'), ((847, 865), 'scipy.special.gamma', 'sp.gamma', (['(3.0 / nu)'], {}), '(3.0 / nu)\n', (855, 865), True, 'import scipy.special as sp\n'), ((868, 886), 'scipy.special.gamma', 'sp.gamma', (['(1.0 / nu)'], {}), '(1.0 / nu)\n', (876, 886), True, 'import scipy.special as sp\n'), ((1122, 1181), 'numpy.asarray', 'np.asarray', (['(self.custom_dist[size:2 * size] > 0.5)', 'np.float'], {}), '(self.custom_dist[size:2 * size] > 0.5, np.float)\n', (1132, 1181), True, 'import numpy as np\n'), ((1214, 1232), 'scipy.special.gamma', 'sp.gamma', (['(3.0 / nu)'], {}), '(3.0 / nu)\n', (1222, 1232), True, 'import scipy.special as sp\n'), ((1235, 1253), 'scipy.special.gamma', 'sp.gamma', (['(1.0 / nu)'], {}), '(1.0 / nu)\n', (1243, 1253), True, 'import scipy.special as sp\n')] |
# encording: utf-8
import os
import argparse
import pathlib
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
"""
Note that the modules (numpy, maplotlib, wave, scipy) are properly installed on your environment.
Plot wave, spectrum, save them as pdf and png at same directory.
Example:
python calc_wave_analysis.py IR_test.wav
"""
plt.rcParams['font.family'] = 'IPAPGothic'
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['xtick.major.width'] = 1.0
plt.rcParams['ytick.major.width'] = 1.0
plt.rcParams['font.size'] = 11
plt.rcParams['axes.linewidth'] = 1.0
plt.rcParams['figure.figsize'] = (8, 7)
plt.rcParams['figure.dpi'] = 300
plt.rcParams['figure.subplot.hspace'] = 0.3
plt.rcParams['figure.subplot.wspace'] = 0.3
def main():
parser = argparse.ArgumentParser(description="This script plots graph from a csv file with 3 columns.")
parser.add_argument('csv_path',
action='store',
nargs=None,
const=None,
default=None,
type=str,
help='Directory path where the csv file is located.',
metavar=None)
parser.add_argument('-d', '--dst_path',
action='store',
nargs='?',
const="/Users/tetsu/personal_files/Research/filters/test/img",
default=".",
# default=None,
type=str,
help='Directory path where you want to locate png files. (default: current directory)',
metavar=None)
# parser.add_argument('-t', '--taps',
# action='store',
# nargs='?',
# default=4,
# default=None,
# type=int,
# help='Directory path where you want to locate png files. (default: current directory)',
# metavar=None)
args = parser.parse_args()
input_name = args.csv_path
input_name = pathlib.Path(input_name)
df = pd.read_csv(input_name, header=None)
print("analize file name: ", input_name)
d, y, e, mse = df[0], df[1], df[2], df[3]
fig = plt.figure()
# mse_tap = args.taps
# mse = []
# for i in range(len(d) - mse_tap):
# mse.append(MSE(d[i:i + mse_tap], y[i:i + mse_tap]))
# mse = [MSE(e[i:i + mse_tap]) for i in range(len(e) - mse_tap)]
#
# log_mse = 20 * np.log10(mse)
# print(log_mse[:5])
# if np.max(log_mse) > 1:
# log_mse -= np.max(log_mse)
# else:
# log_mse += np.max(log_mse)
# print(log_mse[:5])
ax1 = fig.add_subplot(111)
ax1.set_ylabel("MSE [dB]")
ax1.set_xlabel("Sample")
ax1.plot(mse, "y-", alpha=1.0)
# ax1.set_yscale("log")
plt.grid()
output_dir = pathlib.Path(args.dst_path)
input_name = pathlib.Path(str(input_name.stem) + "_conv")
output_name = pathlib.Path(input_name.name).with_suffix(".png")
output_path = pathlib.Path.joinpath(output_dir, output_name)
plt.savefig(output_path)
print("\nfilterd data plot is saved at: ", output_path, "\n")
def MSE(y_list, x_list=None):
if x_list is None:
x_list = np.zeros(len(y_list))
x_list = np.array(x_list)
y_list = np.array(y_list)
# mse = []
# for i in range(len(x_list)):
# mse.append((x_list[i] - y_list[i]) ** 2)
mse = (y_list - x_list) ** 2
return sum(mse) / len(mse)
# %%
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"argparse.ArgumentParser",
"matplotlib.use",
"pathlib.Path",
"pathlib.Path.joinpath",
"numpy.array",
"matplotlib.pyplot.figure"
] | [((130, 151), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (144, 151), False, 'import matplotlib\n'), ((967, 1066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This script plots graph from a csv file with 3 columns."""'}), "(description=\n 'This script plots graph from a csv file with 3 columns.')\n", (990, 1066), False, 'import argparse\n'), ((2370, 2394), 'pathlib.Path', 'pathlib.Path', (['input_name'], {}), '(input_name)\n', (2382, 2394), False, 'import pathlib\n'), ((2407, 2443), 'pandas.read_csv', 'pd.read_csv', (['input_name'], {'header': 'None'}), '(input_name, header=None)\n', (2418, 2443), True, 'import pandas as pd\n'), ((2552, 2564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2562, 2564), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3173), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3171, 3173), True, 'import matplotlib.pyplot as plt\n'), ((3194, 3221), 'pathlib.Path', 'pathlib.Path', (['args.dst_path'], {}), '(args.dst_path)\n', (3206, 3221), False, 'import pathlib\n'), ((3373, 3419), 'pathlib.Path.joinpath', 'pathlib.Path.joinpath', (['output_dir', 'output_name'], {}), '(output_dir, output_name)\n', (3394, 3419), False, 'import pathlib\n'), ((3425, 3449), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {}), '(output_path)\n', (3436, 3449), True, 'import matplotlib.pyplot as plt\n'), ((3630, 3646), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (3638, 3646), True, 'import numpy as np\n'), ((3661, 3677), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (3669, 3677), True, 'import numpy as np\n'), ((3304, 3333), 'pathlib.Path', 'pathlib.Path', (['input_name.name'], {}), '(input_name.name)\n', (3316, 3333), False, 'import pathlib\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Note: this file is a demo version of pre_process_sysu.py, to prepare a demo dataset(save as .npy file) with
a small number of identities for debugging neural network.
"""
import os
import numpy as np
from PIL import Image
# todo_change your own path
data_path = "./data/SYSU-MM01"
rgb_cameras = ['cam1', 'cam2', 'cam4', 'cam5']
ir_cameras = ['cam3', 'cam6']
# load id info
file_path_train = os.path.join(data_path, 'exp/train_id_demo.txt')
with open(file_path_train, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
id_train = ["%04d" % x for x in ids]
files_rgb = []
files_ir = []
for ide in sorted(id_train):
for cam in rgb_cameras:
img_dir = os.path.join(data_path, cam, ide)
if os.path.isdir(img_dir):
new_files = sorted([img_dir + '/' + i for i in os.listdir(img_dir)])
files_rgb.extend(new_files)
for cam in ir_cameras:
img_dir = os.path.join(data_path, cam, ide)
if os.path.isdir(img_dir):
new_files = sorted([img_dir + '/' + i for i in os.listdir(img_dir)])
files_ir.extend(new_files)
# relabel
pid_container = set()
for img_path in files_ir:
# print(img_path)
# print(img_path[-13:-9])
pid = int(img_path[-13:-9])
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
fix_image_width = 144
fix_image_height = 288
def read_imgs(train_image):
"""read_img"""
train_data = []
labels = []
for ipath in train_image:
# img
img = Image.open(ipath)
img = img.resize((fix_image_width, fix_image_height), Image.ANTIALIAS)
pix_array = np.array(img)
train_data.append(pix_array)
# label
pid_label = int(ipath[-13:-9])
pid_label = pid2label[pid_label]
labels.append(pid_label)
return np.array(train_img), np.array(train_label)
# rgb imges
train_img, train_label = read_imgs(files_rgb)
np.save(os.path.join(data_path, 'demo_train_rgb_resized_img.npy'), train_img)
np.save(os.path.join(data_path, 'demo_train_rgb_resized_label.npy'), train_label)
# ir imges
train_img, train_label = read_imgs(files_ir)
np.save(os.path.join(data_path, 'demo_train_ir_resized_img.npy'), train_img)
np.save(os.path.join(data_path, 'demo_train_ir_resized_label.npy'), train_label)
| [
"os.listdir",
"PIL.Image.open",
"os.path.join",
"numpy.array",
"os.path.isdir"
] | [((1067, 1115), 'os.path.join', 'os.path.join', (['data_path', '"""exp/train_id_demo.txt"""'], {}), "(data_path, 'exp/train_id_demo.txt')\n", (1079, 1115), False, 'import os\n'), ((2657, 2714), 'os.path.join', 'os.path.join', (['data_path', '"""demo_train_rgb_resized_img.npy"""'], {}), "(data_path, 'demo_train_rgb_resized_img.npy')\n", (2669, 2714), False, 'import os\n'), ((2735, 2794), 'os.path.join', 'os.path.join', (['data_path', '"""demo_train_rgb_resized_label.npy"""'], {}), "(data_path, 'demo_train_rgb_resized_label.npy')\n", (2747, 2794), False, 'import os\n'), ((2874, 2930), 'os.path.join', 'os.path.join', (['data_path', '"""demo_train_ir_resized_img.npy"""'], {}), "(data_path, 'demo_train_ir_resized_img.npy')\n", (2886, 2930), False, 'import os\n'), ((2951, 3009), 'os.path.join', 'os.path.join', (['data_path', '"""demo_train_ir_resized_label.npy"""'], {}), "(data_path, 'demo_train_ir_resized_label.npy')\n", (2963, 3009), False, 'import os\n'), ((1385, 1418), 'os.path.join', 'os.path.join', (['data_path', 'cam', 'ide'], {}), '(data_path, cam, ide)\n', (1397, 1418), False, 'import os\n'), ((1430, 1452), 'os.path.isdir', 'os.path.isdir', (['img_dir'], {}), '(img_dir)\n', (1443, 1452), False, 'import os\n'), ((1621, 1654), 'os.path.join', 'os.path.join', (['data_path', 'cam', 'ide'], {}), '(data_path, cam, ide)\n', (1633, 1654), False, 'import os\n'), ((1666, 1688), 'os.path.isdir', 'os.path.isdir', (['img_dir'], {}), '(img_dir)\n', (1679, 1688), False, 'import os\n'), ((2236, 2253), 'PIL.Image.open', 'Image.open', (['ipath'], {}), '(ipath)\n', (2246, 2253), False, 'from PIL import Image\n'), ((2353, 2366), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2361, 2366), True, 'import numpy as np\n'), ((2546, 2565), 'numpy.array', 'np.array', (['train_img'], {}), '(train_img)\n', (2554, 2565), True, 'import numpy as np\n'), ((2567, 2588), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (2575, 2588), True, 'import numpy as np\n'), ((1513, 1532), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (1523, 1532), False, 'import os\n'), ((1749, 1768), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (1759, 1768), False, 'import os\n')] |
import os
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from PIL import Image, ImageOps
import numpy as np
from tqdm import tqdm
import argparse
import shutil
# input path
input_path = '/home/tomohiro/code/tcav/tcav/dataset/for_tcav/-mnist-'
# define color list
color_lst = {}
color_lst['blue'] = [0,0,255]
color_lst['yellow'] = [255,255,0]
color_lst['red'] = [255,0,0]
color_lst['purple'] = [128,0,128]
color_lst['green'] = [0,128,0]
# add noize on RGB
noise_std = 20
len_color = len(color_lst)
for i, color in enumerate(color_lst):
i+=5
files = os.listdir(input_path + str(i))
print(input_path[:-7] + color + input_path[-7:] + str(i))
if not os.path.exists(input_path[:-7] + color + input_path[-7:] + str(i)):
os.mkdir(input_path[:-7] + color + input_path[-7:] + str(i))
for f in files:
img = Image.open(input_path + str(i) + '/' + f).convert('L')
rgb = color_lst[color] + np.random.normal(0, noise_std, 3)
rgb = np.where(rgb <= 255, rgb, 255)
rgb = np.where(rgb >= 0, rgb, 0)
color_img = ImageOps.colorize(img, black=(0, 0, 0), white=rgb)
color_img.save(input_path[:-7] + color + input_path[-7:] + str(i) + '/' + f)
| [
"numpy.where",
"PIL.ImageOps.colorize",
"numpy.random.normal"
] | [((993, 1023), 'numpy.where', 'np.where', (['(rgb <= 255)', 'rgb', '(255)'], {}), '(rgb <= 255, rgb, 255)\n', (1001, 1023), True, 'import numpy as np\n'), ((1038, 1064), 'numpy.where', 'np.where', (['(rgb >= 0)', 'rgb', '(0)'], {}), '(rgb >= 0, rgb, 0)\n', (1046, 1064), True, 'import numpy as np\n'), ((1085, 1135), 'PIL.ImageOps.colorize', 'ImageOps.colorize', (['img'], {'black': '(0, 0, 0)', 'white': 'rgb'}), '(img, black=(0, 0, 0), white=rgb)\n', (1102, 1135), False, 'from PIL import Image, ImageOps\n'), ((945, 978), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_std', '(3)'], {}), '(0, noise_std, 3)\n', (961, 978), True, 'import numpy as np\n')] |
from scvelo.plotting.docs import doc_scatter, doc_params
from scvelo.plotting.utils import *
from inspect import signature
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
@doc_params(scatter=doc_scatter)
def scatter(
adata=None,
basis=None,
x=None,
y=None,
vkey=None,
color=None,
use_raw=None,
layer=None,
color_map=None,
colorbar=None,
palette=None,
size=None,
alpha=None,
linewidth=None,
linecolor=None,
perc=None,
groups=None,
sort_order=True,
components=None,
projection=None,
legend_loc=None,
legend_loc_lines=None,
legend_fontsize=None,
legend_fontweight=None,
legend_fontoutline=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
figsize=None,
xlim=None,
ylim=None,
add_density=None,
add_assignments=None,
add_linfit=None,
add_polyfit=None,
add_rug=None,
add_text=None,
add_text_pos=None,
add_quiver=None,
quiver_size=None,
add_outline=None,
outline_width=None,
outline_color=None,
n_convolve=None,
smooth=None,
rescale_color=None,
color_gradients=None,
dpi=None,
frameon=None,
zorder=None,
ncols=None,
nrows=None,
wspace=None,
hspace=None,
show=None,
save=None,
ax=None,
**kwargs,
):
"""\
Scatter plot along observations or variables axes.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
x: `str`, `np.ndarray` or `None` (default: `None`)
x coordinate
y: `str`, `np.ndarray` or `None` (default: `None`)
y coordinate
{scatter}
Returns
-------
If `show==False` a `matplotlib.Axis`
"""
if adata is None and (x is not None and y is not None):
adata = AnnData(np.stack([x, y]).T)
# restore old conventions
add_assignments = kwargs.pop("show_assignments", add_assignments)
add_linfit = kwargs.pop("show_linear_fit", add_linfit)
add_polyfit = kwargs.pop("show_polyfit", add_polyfit)
add_density = kwargs.pop("show_density", add_density)
add_rug = kwargs.pop("rug", add_rug)
basis = kwargs.pop("var_names", basis)
# keys for figures (fkeys) and multiple plots (mkeys)
fkeys = ["adata", "show", "save", "groups", "ncols", "nrows", "wspace", "hspace"]
fkeys += ["ax", "kwargs"]
mkeys = ["color", "layer", "basis", "components", "x", "y", "xlabel", "ylabel"]
mkeys += ["title", "color_map", "add_text"]
scatter_kwargs = {"show": False, "save": False}
for key in signature(scatter).parameters:
if key not in mkeys + fkeys:
scatter_kwargs[key] = eval(key)
mkwargs = {}
for key in mkeys: # mkwargs[key] = key for key in mkeys
mkwargs[key] = eval("{0}[0] if is_list({0}) else {0}".format(key))
# use c & color and cmap & color_map interchangeably,
# and plot each group separately if groups is 'all'
if "c" in kwargs:
color = kwargs.pop("c")
if "cmap" in kwargs:
color_map = kwargs.pop("cmap")
if "rasterized" not in kwargs:
kwargs["rasterized"] = settings._vector_friendly
if isinstance(color_map, (list, tuple)) and all(
[is_color_like(c) or c == "transparent" for c in color_map]
):
color_map = rgb_custom_colormap(colors=color_map)
if isinstance(groups, str) and groups == "all":
if color is None:
color = default_color(adata)
if is_categorical(adata, color):
vc = adata.obs[color].value_counts()
groups = [[c] for c in vc[vc > 0].index]
if isinstance(add_text, (list, tuple, np.ndarray, np.record)):
add_text = list(np.array(add_text, dtype=str))
# create list of each mkey and check if all bases are valid.
color, layer, components = to_list(color), to_list(layer), to_list(components)
x, y, basis = to_list(x), to_list(y), to_valid_bases_list(adata, basis)
# get multikey (with more than one element)
multikeys = eval(f"[{','.join(mkeys)}]")
if is_list_of_list(groups):
multikeys.append(groups)
key_lengths = np.array([len(key) if is_list(key) else 1 for key in multikeys])
multikey = (
multikeys[np.where(key_lengths > 1)[0][0]] if np.max(key_lengths) > 1 else None
)
# gridspec frame for plotting multiple keys (mkeys: list or tuple)
if multikey is not None:
if np.sum(key_lengths > 1) == 1 and is_list_of_str(multikey):
multikey = unique(multikey) # take unique set if no more than one multikey
if len(multikey) > 20:
raise ValueError("Please restrict the passed list to max 20 elements.")
if ax is not None:
logg.warn("Cannot specify `ax` when plotting multiple panels.")
if is_list(title):
title *= int(np.ceil(len(multikey) / len(title)))
if nrows is None:
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
else:
ncols = int(np.ceil(len(multikey) / nrows))
if not frameon:
lloc, llines = "legend_loc", "legend_loc_lines"
if lloc in scatter_kwargs and scatter_kwargs[lloc] is None:
scatter_kwargs[lloc] = "none"
if llines in scatter_kwargs and scatter_kwargs[llines] is None:
scatter_kwargs[llines] = "none"
grid_figsize, dpi = get_figure_params(figsize, dpi, ncols)
grid_figsize = (grid_figsize[0] * ncols, grid_figsize[1] * nrows)
fig = pl.figure(None, grid_figsize, dpi=dpi)
hspace = 0.3 if hspace is None else hspace
gspec = pl.GridSpec(nrows, ncols, fig, hspace=hspace, wspace=wspace)
ax = []
for i, gs in enumerate(gspec):
if i < len(multikey):
g = groups[i * (len(groups) > i)] if is_list_of_list(groups) else groups
multi_kwargs = {"groups": g}
for key in mkeys: # multi_kwargs[key] = key[i] if is multikey else key
multi_kwargs[key] = eval(
"{0}[i * (len({0}) > i)] if is_list({0}) else {0}".format(key)
)
ax.append(
scatter(
adata,
ax=pl.subplot(gs),
**multi_kwargs,
**scatter_kwargs,
**kwargs,
)
)
if not frameon and isinstance(ylabel, str):
set_label(xlabel, ylabel, fontsize, ax=ax[0], fontweight="bold")
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
else:
# make sure that there are no more lists, e.g. ['clusters'] becomes 'clusters'
color_map = to_val(color_map)
color, layer, basis = to_val(color), to_val(layer), to_val(basis)
x, y, components = to_val(x), to_val(y), to_val(components)
xlabel, ylabel, title = to_val(xlabel), to_val(ylabel), to_val(title)
# multiple plots within one ax for comma-separated y or layers (string).
if any([isinstance(key, str) and "," in key for key in [y, layer]]):
# comma split
y, layer, color = [
[k.strip() for k in key.split(",")]
if isinstance(key, str) and "," in key
else to_list(key)
for key in [y, layer, color]
]
multikey = y if len(y) > 1 else layer if len(layer) > 1 else None
if multikey is not None:
for i, mi in enumerate(multikey):
ax = scatter(
adata,
x=x,
y=y[i * (len(y) > i)],
color=color[i * (len(color) > i)],
layer=layer[i * (len(layer) > i)],
basis=basis,
components=components,
groups=groups,
xlabel=xlabel,
ylabel="expression" if ylabel is None else ylabel,
color_map=color_map,
title=y[i * (len(y) > i)] if title is None else title,
ax=ax,
**scatter_kwargs,
)
if legend_loc is None:
legend_loc = "best"
if legend_loc and legend_loc != "none":
multikey = [key.replace("Ms", "spliced") for key in multikey]
multikey = [key.replace("Mu", "unspliced") for key in multikey]
ax.legend(multikey, fontsize=legend_fontsize, loc=legend_loc)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
elif color_gradients is not None and color_gradients is not False:
vals, names, color, scatter_kwargs = gets_vals_from_color_gradients(
adata, color, **scatter_kwargs
)
cols = zip(adata.obs[color].cat.categories, adata.uns[f"{color}_colors"])
c_colors = {cat: col for (cat, col) in cols}
mkwargs.pop("color")
ax = scatter(
adata,
color="grey",
ax=ax,
**mkwargs,
**get_kwargs(scatter_kwargs, {"alpha": 0.05}),
) # background
ax = scatter(
adata,
color=color,
ax=ax,
**mkwargs,
**get_kwargs(scatter_kwargs, {"s": 0}),
) # set legend
sorted_idx = np.argsort(vals, 1)[:, ::-1][:, :2]
for id0 in range(len(names)):
for id1 in range(id0 + 1, len(names)):
cmap = rgb_custom_colormap(
[c_colors[names[id0]], "white", c_colors[names[id1]]],
alpha=[1, 0, 1],
)
mkwargs.update({"color_map": cmap})
c_vals = np.array(vals[:, id1] - vals[:, id0]).flatten()
c_bool = np.array([id0 in c and id1 in c for c in sorted_idx])
if np.sum(c_bool) > 1:
_adata = adata[c_bool] if np.sum(~c_bool) > 0 else adata
mkwargs["color"] = c_vals[c_bool]
ax = scatter(
_adata, ax=ax, **mkwargs, **scatter_kwargs, **kwargs
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
# actual scatter plot
else:
# set color, color_map, edgecolor, basis, linewidth, frameon, use_raw
if color is None:
color = default_color(adata, add_outline)
if "cmap" not in kwargs:
kwargs["cmap"] = (
default_color_map(adata, color) if color_map is None else color_map
)
if "s" not in kwargs:
kwargs["s"] = default_size(adata) if size is None else size
if "edgecolor" not in kwargs:
kwargs["edgecolor"] = "none"
is_embedding = ((x is None) | (y is None)) and basis not in adata.var_names
if basis is None and is_embedding:
basis = default_basis(adata)
if linewidth is None:
linewidth = 1
if linecolor is None:
linecolor = "k"
if frameon is None:
frameon = True if not is_embedding else settings._frameon
if isinstance(groups, str):
groups = [groups]
if use_raw is None and basis not in adata.var_names:
use_raw = layer is None and adata.raw is not None
if projection == "3d":
from mpl_toolkits.mplot3d import Axes3D
ax, show = get_ax(ax, show, figsize, dpi, projection)
# phase portrait: get x and y from .layers (e.g. spliced vs. unspliced)
# NOTE(Haotian): true phase portrait plot here
if basis in adata.var_names:
if title is None:
title = basis
if x is None and y is None:
x = default_xkey(adata, use_raw=use_raw)
y = default_ykey(adata, use_raw=use_raw)
elif x is None or y is None:
raise ValueError("Both x and y have to specified.")
if isinstance(x, str) and isinstance(y, str):
layers_keys = list(adata.layers.keys()) + ["X"]
if any([key not in layers_keys for key in [x, y]]):
raise ValueError("Could not find x or y in layers.")
if xlabel is None:
xlabel = x
if ylabel is None:
ylabel = y
# NOTE(Haotian): the data to plot is retrieved here
x = get_obs_vector(adata, basis, layer=x, use_raw=use_raw)
y = get_obs_vector(adata, basis, layer=y, use_raw=use_raw)
if legend_loc is None:
legend_loc = "none"
if use_raw and perc is not None:
ub = np.percentile(x, 99.9 if not isinstance(perc, int) else perc)
ax.set_xlim(right=ub * 1.05)
ub = np.percentile(y, 99.9 if not isinstance(perc, int) else perc)
ax.set_ylim(top=ub * 1.05)
# velocity model fits (full dynamics and steady-state ratios)
if any(["gamma" in key or "alpha" in key for key in adata.var.keys()]):
plot_velocity_fits(
adata,
basis,
vkey,
use_raw,
linewidth,
linecolor,
legend_loc_lines,
legend_fontsize,
add_assignments,
ax=ax,
)
# embedding: set x and y to embedding coordinates
elif is_embedding:
X_emb = adata.obsm[f"X_{basis}"][:, get_components(components, basis)]
x, y = X_emb[:, 0], X_emb[:, 1]
# todo: 3d plotting
# z = X_emb[:, 2] if projection == "3d" and X_emb.shape[1] > 2 else None
elif isinstance(x, str) and isinstance(y, str):
var_names = (
adata.raw.var_names
if use_raw and adata.raw is not None
else adata.var_names
)
if layer is None:
layer = default_xkey(adata, use_raw=use_raw)
x_keys = list(adata.obs.keys()) + list(adata.layers.keys())
is_timeseries = y in var_names and x in x_keys
if xlabel is None:
xlabel = x
if ylabel is None:
ylabel = layer if is_timeseries else y
if title is None:
title = y if is_timeseries else color
if legend_loc is None:
legend_loc = "none"
# gene trend: x and y as gene along obs/layers (e.g. pseudotime)
if is_timeseries:
x = (
adata.obs[x]
if x in adata.obs.keys()
else adata.obs_vector(y, layer=x)
)
y = get_obs_vector(adata, basis=y, layer=layer, use_raw=use_raw)
# get x and y from var_names, var or obs
else:
if x in var_names and y in var_names:
if layer in adata.layers.keys():
x = adata.obs_vector(x, layer=layer)
y = adata.obs_vector(y, layer=layer)
else:
data = adata.raw if use_raw else adata
x, y = data.obs_vector(x), data.obs_vector(y)
elif x in adata.var.keys() and y in adata.var.keys():
x, y = adata.var[x], adata.var[y]
elif x in adata.obs.keys() and y in adata.obs.keys():
x, y = adata.obs[x], adata.obs[y]
elif np.any(
[var_key in x or var_key in y for var_key in adata.var.keys()]
):
var_keys = [
k
for k in adata.var.keys()
if not isinstance(adata.var[k][0], str)
]
var = adata.var[var_keys]
x = var.astype(np.float32).eval(x)
y = var.astype(np.float32).eval(y)
elif np.any(
[obs_key in x or obs_key in y for obs_key in adata.obs.keys()]
):
obs_keys = [
k
for k in adata.obs.keys()
if not isinstance(adata.obs[k][0], str)
]
obs = adata.obs[obs_keys]
x = obs.astype(np.float32).eval(x)
y = obs.astype(np.float32).eval(y)
else:
raise ValueError(
"x or y is invalid! pass valid observation or a gene name"
)
x, y = make_dense(x).flatten(), make_dense(y).flatten()
# convolve along x axes (e.g. pseudotime)
if n_convolve is not None:
vec_conv = np.ones(n_convolve) / n_convolve
y[np.argsort(x)] = np.convolve(y[np.argsort(x)], vec_conv, mode="same")
# if color is set to a cell index, plot that cell on top
if is_int(color) or is_list_of_int(color) and len(color) != len(x):
color = np.array(np.isin(np.arange(len(x)), color), dtype=bool)
size = kwargs["s"] * 2 if np.sum(color) == 1 else kwargs["s"]
if zorder is None:
zorder = 10
ax.scatter(
np.ravel(x[color]),
np.ravel(y[color]),
s=size,
zorder=zorder,
color=palette[-1] if palette is not None else "darkblue",
)
color = (
palette[0] if palette is not None and len(palette) > 1 else "gold"
)
zorder -= 1
# if color is in {'ascending', 'descending'}
elif isinstance(color, str):
if color == "ascending":
color = np.linspace(0, 1, len(x))
elif color == "descending":
color = np.linspace(1, 0, len(x))
# set palette if categorical color vals
if is_categorical(adata, color):
set_colors_for_categorical_obs(adata, color, palette)
# set color
if (
basis in adata.var_names
and isinstance(color, str)
and color in adata.layers.keys()
):
# phase portrait: color=basis, layer=color
c = interpret_colorkey(adata, basis, color, perc, use_raw)
else:
# embedding, gene trend etc.
c = interpret_colorkey(adata, color, layer, perc, use_raw)
if c is not None and not isinstance(c, str) and not isinstance(c[0], str):
# smooth color values across neighbors and rescale
if smooth and len(c) == adata.n_obs:
n_neighbors = None if isinstance(smooth, bool) else smooth
c = get_connectivities(adata, n_neighbors=n_neighbors).dot(c)
# rescale color values to min and max acc. to rescale_color tuple
if rescale_color is not None:
try:
c += rescale_color[0] - np.nanmin(c)
c *= rescale_color[1] / np.nanmax(c)
except:
logg.warn("Could not rescale colors. Pass a tuple, e.g. [0,1].")
# set vmid to 0 if color values obtained from velocity expression
if not np.any([v in kwargs for v in ["vmin", "vmid", "vmax"]]) and np.any(
[
isinstance(v, str)
and "time" not in v
and (v.endswith("velocity") or v.endswith("transition"))
for v in [color, layer]
]
):
kwargs["vmid"] = 0
# introduce vmid by setting vmin and vmax accordingly
if "vmid" in kwargs:
vmid = kwargs.pop("vmid")
if vmid is not None:
if not (isinstance(c, str) or isinstance(c[0], str)):
lb, ub = np.min(c), np.max(c)
crange = max(np.abs(vmid - lb), np.abs(ub - vmid))
kwargs.update({"vmin": vmid - crange, "vmax": vmid + crange})
x, y = np.ravel(x), np.ravel(y)
if len(x) != len(y):
raise ValueError("x or y do not share the same dimension.")
if not isinstance(c, str):
c = np.ravel(c) if len(np.ravel(c)) == len(x) else c
if len(c) != len(x):
c = "grey"
if not isinstance(color, str) or color != default_color(adata):
logg.warn("Invalid color key. Using grey instead.")
# store original order of color values
# NOTE(Haotian): actual data to plot
color_array, scatter_array = c, np.stack([x, y]).T
# set color to grey for NAN values and for cells that are not in groups
if (
groups is not None
or is_categorical(adata, color)
and np.any(pd.isnull(adata.obs[color]))
):
if isinstance(groups, (list, tuple, np.record)):
groups = unique(groups)
zorder = 0 if zorder is None else zorder
pop_keys = ["groups", "add_linfit", "add_polyfit", "add_density"]
_ = [scatter_kwargs.pop(key, None) for key in pop_keys]
ax = scatter(
adata,
x=x,
y=y,
basis=basis,
layer=layer,
color="lightgrey",
ax=ax,
**scatter_kwargs,
)
if groups is not None and len(groups) == 1:
if (
isinstance(groups[0], str)
and groups[0] in adata.var.keys()
and basis in adata.var_names
):
groups = f"{adata[:, basis].var[groups[0]][0]}"
idx = groups_to_bool(adata, groups, color)
if idx is not None:
if np.sum(idx) > 0: # if any group to be highlighted
x, y = x[idx], y[idx]
if not isinstance(c, str) and len(c) == adata.n_obs:
c = c[idx]
if isinstance(kwargs["s"], np.ndarray):
kwargs["s"] = np.array(kwargs["s"])[idx]
if (
title is None
and groups is not None
and len(groups) == 1
and isinstance(groups[0], str)
):
title = groups[0]
else: # if nothing to be highlighted
add_linfit, add_polyfit, add_density = None, None, None
# check if higher value points should be plotted on top
if not isinstance(c, str) and len(c) == len(x):
order = None
if sort_order and not is_categorical(adata, color):
order = np.argsort(c)
elif not sort_order and is_categorical(adata, color):
counts = get_value_counts(adata, color)
np.random.seed(0)
nums, p = np.arange(0, len(x)), counts / np.sum(counts)
order = np.random.choice(nums, len(x), replace=False, p=p)
if order is not None:
x, y, c = x[order], y[order], c[order]
if isinstance(kwargs["s"], np.ndarray): # sort sizes if array-type
kwargs["s"] = np.array(kwargs["s"])[order]
# check if plot quivers
if add_quiver:
quiver_kwargs = {
"scale": quiver_size if quiver_size else 1,
"cmap": kwargs["cmap"],
"angles": "xy",
"scale_units": "xy",
"edgecolors": "k",
"linewidth": 0.1,
"width": None,
}
vs = get_obs_vector(adata, basis, layer='velocity', use_raw=use_raw)
vu = np.zeros_like(vs)
# make dense().flatten()
# ravel
if is_color_like(c[0]):
ax.quiver(x, y, vs, vu, color=c, **quiver_kwargs)
else:
ax.quiver(x, y, vs, vu, c, **quiver_kwargs)
# NOTE(Haotian): the actual scatter
smp = ax.scatter(
x, y, c=c, alpha=alpha, marker=".", zorder=zorder, **kwargs
)
outline_dtypes = (list, tuple, np.ndarray, int, np.int_, str)
if isinstance(add_outline, outline_dtypes) or add_outline:
if isinstance(add_outline, (list, tuple, np.record)):
add_outline = unique(add_outline)
if (
add_outline is not True
and isinstance(add_outline, (int, np.int_))
or is_list_of_int(add_outline)
and len(add_outline) != len(x)
):
add_outline = np.isin(np.arange(len(x)), add_outline)
add_outline = np.array(add_outline, dtype=bool)
if outline_width is None:
outline_width = (0.6, 0.3)
if isinstance(add_outline, str):
if add_outline in adata.var.keys() and basis in adata.var_names:
add_outline = f"{adata[:, basis].var[add_outline][0]}"
idx = groups_to_bool(adata, add_outline, color)
if idx is not None and np.sum(idx) > 0: # if anything to be outlined
zorder = 2 if zorder is None else zorder + 2
if kwargs["s"] is not None:
kwargs["s"] *= 1.2
# restore order of values
x, y = scatter_array[:, 0][idx], scatter_array[:, 1][idx]
c = color_array
if not isinstance(c, str) and len(c) == adata.n_obs:
c = c[idx]
if isinstance(kwargs["s"], np.ndarray):
kwargs["s"] = np.array(kwargs["s"])[idx]
if isinstance(c, np.ndarray) and not isinstance(c[0], str):
if "vmid" not in kwargs and "vmin" not in kwargs:
kwargs["vmin"] = np.min(color_array)
if "vmid" not in kwargs and "vmax" not in kwargs:
kwargs["vmax"] = np.max(color_array)
ax.scatter(
x, y, c=c, alpha=alpha, marker=".", zorder=zorder, **kwargs
)
if idx is None or np.sum(idx) > 0: # if all or anything to be outlined
plot_outline(
x, y, kwargs, outline_width, outline_color, zorder, ax=ax
)
if idx is not None and np.sum(idx) == 0: # if nothing to be outlined
add_linfit, add_polyfit, add_density = None, None, None
# set legend if categorical categorical color vals
if is_categorical(adata, color) and len(scatter_array) == adata.n_obs:
legend_loc = default_legend_loc(adata, color, legend_loc)
g_bool = groups_to_bool(adata, add_outline, color)
if not (add_outline is None or g_bool is None):
groups = add_outline
set_legend(
adata,
ax,
color,
legend_loc,
scatter_array,
legend_fontweight,
legend_fontsize,
legend_fontoutline,
groups,
)
if add_density:
plot_density(x, y, add_density, ax=ax)
if add_linfit:
if add_linfit is True and basis in adata.var_names:
add_linfit = "no_intercept" # without intercept
plot_linfit(
x,
y,
add_linfit,
legend_loc != "none",
linecolor,
linewidth,
fontsize,
ax=ax,
)
if add_polyfit:
if add_polyfit is True and basis in adata.var_names:
add_polyfit = "no_intercept" # without intercept
plot_polyfit(
x,
y,
add_polyfit,
legend_loc != "none",
linecolor,
linewidth,
fontsize,
ax=ax,
)
if add_rug:
rug_color = add_rug if isinstance(add_rug, str) else color
rug_color = np.ravel(interpret_colorkey(adata, rug_color))
plot_rug(np.ravel(x), color=rug_color, ax=ax)
if add_text:
if add_text_pos is None:
add_text_pos = [0.05, 0.95]
ax.text(
add_text_pos[0],
add_text_pos[1],
f"{add_text}",
ha="left",
va="top",
fontsize=fontsize,
transform=ax.transAxes,
bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.2),
)
set_label(xlabel, ylabel, fontsize, basis, ax=ax)
set_title(title, layer, color, fontsize, ax=ax)
update_axes(ax, xlim, ylim, fontsize, is_embedding, frameon, figsize)
if colorbar is not False:
if not isinstance(c, str) and not is_categorical(adata, color):
labelsize = fontsize * 0.75 if fontsize is not None else None
set_colorbar(smp, ax=ax, labelsize=labelsize)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
def _wraps_plot_scatter(wrapper):
annots_orig = {
k: v for k, v in wrapper.__annotations__.items() if k not in {"adata", "kwargs"}
}
annots_scatter = {k: v for k, v in scatter.__annotations__.items() if k != "basis"}
wrapper.__annotations__ = {**annots_scatter, **annots_orig}
wrapper.__wrapped__ = scatter
return wrapper
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def trimap(adata, **kwargs):
"""\
Scatter plot in trimap basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return scatter(adata, basis="trimap", **kwargs)
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def umap(adata, **kwargs):
"""\
Scatter plot in UMAP basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return scatter(adata, basis="umap", **kwargs)
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def tsne(adata, **kwargs):
"""\
Scatter plot in tsne basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return scatter(adata, basis="tsne", **kwargs)
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def diffmap(adata, **kwargs):
"""\
Scatter plot in diffmap basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return scatter(adata, basis="diffmap", **kwargs)
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def phate(adata, **kwargs):
"""\
Scatter plot in phate basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return scatter(adata, basis="phate", **kwargs)
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def draw_graph(adata, layout=None, **kwargs):
"""\
Scatter plot in draw_graph basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if layout is None:
layout = f"{adata.uns['draw_graph']['params']['layout']}"
basis = f"draw_graph_{layout}"
if f"X_{basis}" not in adata.obsm_keys():
raise ValueError(f"Could not find draw_graph_{layout} in adata.obs.")
return scatter(adata, basis=basis, **kwargs)
@_wraps_plot_scatter
@doc_params(scatter=doc_scatter)
def pca(adata, **kwargs):
"""\
Scatter plot in pca basis.
Parameters
----------
{scatter}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return scatter(adata, basis="pca", **kwargs)
| [
"inspect.signature",
"numpy.argsort",
"matplotlib.pyplot.GridSpec",
"numpy.array",
"numpy.nanmin",
"scvelo.plotting.docs.doc_params",
"numpy.where",
"numpy.zeros_like",
"numpy.max",
"numpy.stack",
"numpy.random.seed",
"numpy.nanmax",
"numpy.min",
"numpy.abs",
"numpy.ones",
"numpy.any",... | [((197, 228), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (207, 228), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((32111, 32142), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (32121, 32142), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((32439, 32470), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (32449, 32470), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((32761, 32792), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (32771, 32792), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((33083, 33114), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (33093, 33114), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((33414, 33445), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (33424, 33445), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((33739, 33770), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (33749, 33770), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((34333, 34364), 'scvelo.plotting.docs.doc_params', 'doc_params', ([], {'scatter': 'doc_scatter'}), '(scatter=doc_scatter)\n', (34343, 34364), False, 'from scvelo.plotting.docs import doc_scatter, doc_params\n'), ((2604, 2622), 'inspect.signature', 'signature', (['scatter'], {}), '(scatter)\n', (2613, 2622), False, 'from inspect import signature\n'), ((5624, 5662), 'matplotlib.pyplot.figure', 'pl.figure', (['None', 'grid_figsize'], {'dpi': 'dpi'}), '(None, grid_figsize, dpi=dpi)\n', (5633, 5662), True, 'import matplotlib.pyplot as pl\n'), ((5730, 5790), 'matplotlib.pyplot.GridSpec', 'pl.GridSpec', (['nrows', 'ncols', 'fig'], {'hspace': 'hspace', 'wspace': 'wspace'}), '(nrows, ncols, fig, hspace=hspace, wspace=wspace)\n', (5741, 5790), True, 'import matplotlib.pyplot as pl\n'), ((3733, 3762), 'numpy.array', 'np.array', (['add_text'], {'dtype': 'str'}), '(add_text, dtype=str)\n', (3741, 3762), True, 'import numpy as np\n'), ((4302, 4321), 'numpy.max', 'np.max', (['key_lengths'], {}), '(key_lengths)\n', (4308, 4321), True, 'import numpy as np\n'), ((1850, 1866), 'numpy.stack', 'np.stack', (['[x, y]'], {}), '([x, y])\n', (1858, 1866), True, 'import numpy as np\n'), ((4454, 4477), 'numpy.sum', 'np.sum', (['(key_lengths > 1)'], {}), '(key_lengths > 1)\n', (4460, 4477), True, 'import numpy as np\n'), ((4266, 4291), 'numpy.where', 'np.where', (['(key_lengths > 1)'], {}), '(key_lengths > 1)\n', (4274, 4291), True, 'import numpy as np\n'), ((21608, 21619), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (21616, 21619), True, 'import numpy as np\n'), ((21621, 21632), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (21629, 21632), True, 'import numpy as np\n'), ((25695, 25712), 'numpy.zeros_like', 'np.zeros_like', (['vs'], {}), '(vs)\n', (25708, 25712), True, 'import numpy as np\n'), ((9794, 9813), 'numpy.argsort', 'np.argsort', (['vals', '(1)'], {}), '(vals, 1)\n', (9804, 9813), True, 'import numpy as np\n'), ((10279, 10334), 'numpy.array', 'np.array', (['[(id0 in c and id1 in c) for c in sorted_idx]'], {}), '([(id0 in c and id1 in c) for c in sorted_idx])\n', (10287, 10334), True, 'import numpy as np\n'), ((18063, 18082), 'numpy.ones', 'np.ones', (['n_convolve'], {}), '(n_convolve)\n', (18070, 18082), True, 'import numpy as np\n'), ((18114, 18127), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (18124, 18127), True, 'import numpy as np\n'), ((18607, 18625), 'numpy.ravel', 'np.ravel', (['x[color]'], {}), '(x[color])\n', (18615, 18625), True, 'import numpy as np\n'), ((18647, 18665), 'numpy.ravel', 'np.ravel', (['y[color]'], {}), '(y[color])\n', (18655, 18665), True, 'import numpy as np\n'), ((20766, 20823), 'numpy.any', 'np.any', (["[(v in kwargs) for v in ['vmin', 'vmid', 'vmax']]"], {}), "([(v in kwargs) for v in ['vmin', 'vmid', 'vmax']])\n", (20772, 20823), True, 'import numpy as np\n'), ((21802, 21813), 'numpy.ravel', 'np.ravel', (['c'], {}), '(c)\n', (21810, 21813), True, 'import numpy as np\n'), ((22224, 22240), 'numpy.stack', 'np.stack', (['[x, y]'], {}), '([x, y])\n', (22232, 22240), True, 'import numpy as np\n'), ((24615, 24628), 'numpy.argsort', 'np.argsort', (['c'], {}), '(c)\n', (24625, 24628), True, 'import numpy as np\n'), ((26771, 26804), 'numpy.array', 'np.array', (['add_outline'], {'dtype': 'bool'}), '(add_outline, dtype=bool)\n', (26779, 26804), True, 'import numpy as np\n'), ((30616, 30627), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (30624, 30627), True, 'import numpy as np\n'), ((6372, 6386), 'matplotlib.pyplot.subplot', 'pl.subplot', (['gs'], {}), '(gs)\n', (6382, 6386), True, 'import matplotlib.pyplot as pl\n'), ((10356, 10370), 'numpy.sum', 'np.sum', (['c_bool'], {}), '(c_bool)\n', (10362, 10370), True, 'import numpy as np\n'), ((18145, 18158), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (18155, 18158), True, 'import numpy as np\n'), ((18456, 18469), 'numpy.sum', 'np.sum', (['color'], {}), '(color)\n', (18462, 18469), True, 'import numpy as np\n'), ((22455, 22482), 'pandas.isnull', 'pd.isnull', (['adata.obs[color]'], {}), '(adata.obs[color])\n', (22464, 22482), True, 'import pandas as pd\n'), ((23574, 23585), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (23580, 23585), True, 'import numpy as np\n'), ((24779, 24796), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (24793, 24796), True, 'import numpy as np\n'), ((27218, 27229), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (27224, 27229), True, 'import numpy as np\n'), ((28344, 28355), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (28350, 28355), True, 'import numpy as np\n'), ((28575, 28586), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (28581, 28586), True, 'import numpy as np\n'), ((10202, 10239), 'numpy.array', 'np.array', (['(vals[:, id1] - vals[:, id0])'], {}), '(vals[:, id1] - vals[:, id0])\n', (10210, 10239), True, 'import numpy as np\n'), ((20477, 20489), 'numpy.nanmin', 'np.nanmin', (['c'], {}), '(c)\n', (20486, 20489), True, 'import numpy as np\n'), ((20538, 20550), 'numpy.nanmax', 'np.nanmax', (['c'], {}), '(c)\n', (20547, 20550), True, 'import numpy as np\n'), ((21406, 21415), 'numpy.min', 'np.min', (['c'], {}), '(c)\n', (21412, 21415), True, 'import numpy as np\n'), ((21417, 21426), 'numpy.max', 'np.max', (['c'], {}), '(c)\n', (21423, 21426), True, 'import numpy as np\n'), ((21464, 21481), 'numpy.abs', 'np.abs', (['(vmid - lb)'], {}), '(vmid - lb)\n', (21470, 21481), True, 'import numpy as np\n'), ((21483, 21500), 'numpy.abs', 'np.abs', (['(ub - vmid)'], {}), '(ub - vmid)\n', (21489, 21500), True, 'import numpy as np\n'), ((21821, 21832), 'numpy.ravel', 'np.ravel', (['c'], {}), '(c)\n', (21829, 21832), True, 'import numpy as np\n'), ((25175, 25196), 'numpy.array', 'np.array', (["kwargs['s']"], {}), "(kwargs['s'])\n", (25183, 25196), True, 'import numpy as np\n'), ((27787, 27808), 'numpy.array', 'np.array', (["kwargs['s']"], {}), "(kwargs['s'])\n", (27795, 27808), True, 'import numpy as np\n'), ((28013, 28032), 'numpy.min', 'np.min', (['color_array'], {}), '(color_array)\n', (28019, 28032), True, 'import numpy as np\n'), ((28152, 28171), 'numpy.max', 'np.max', (['color_array'], {}), '(color_array)\n', (28158, 28171), True, 'import numpy as np\n'), ((10426, 10441), 'numpy.sum', 'np.sum', (['(~c_bool)'], {}), '(~c_bool)\n', (10432, 10441), True, 'import numpy as np\n'), ((23893, 23914), 'numpy.array', 'np.array', (["kwargs['s']"], {}), "(kwargs['s'])\n", (23901, 23914), True, 'import numpy as np\n'), ((24858, 24872), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (24864, 24872), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import random
myColumns = ['Eleanor','Chidi', 'Tahani', 'Jason']
myData = np.random.randint(low=0, high=101, size=(4,4))
myDataFrame = pd.DataFrame(data=myData, columns=myColumns)
print(myDataFrame)
print(myDataFrame['Eleanor'][1])
myDataFrame['Janet'] = myDataFrame['Jason'] + myDataFrame['Tahani']
print(myDataFrame)
referenceOfDataFrame = myDataFrame
print(referenceOfDataFrame)
copyOfDataFrame = myDataFrame.copy() #true copy of the original dataframe
print(copyOfDataFrame) | [
"pandas.DataFrame",
"numpy.random.randint"
] | [((114, 161), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(101)', 'size': '(4, 4)'}), '(low=0, high=101, size=(4, 4))\n', (131, 161), True, 'import numpy as np\n'), ((175, 219), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'myData', 'columns': 'myColumns'}), '(data=myData, columns=myColumns)\n', (187, 219), True, 'import pandas as pd\n')] |
import os
import pandas as pd
import numpy as np
import flask
import pickle
import joblib
from flask import Flask, render_template, request
import requests
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route("/predict",methods = ["GET","POST"])
def result():
if request.method == "POST":
idd = int(request.form.get('idd'))
perc = float(request.form.get('perc'))
age = int(request.form.get('age'))
income = int(request.form.get('income'))
c1 = float(request.form.get('count1'))
c2 = float(request.form.get('count2'))
c3 = float(request.form.get('count3'))
auc = float(request.form.get('auc'))
num = int(request.form.get('num'))
s_channel = list(request.form.get('s_channel'))
residence = list(request.form.get('residence'))
age_group = list(request.form.get('age_group'))
status = list(request.form.get('status'))
prediction = [idd, perc, age, income, c1, c2, c3, auc, num] + s_channel + residence + age_group + status
prediction = np.array(prediction)
prediction = prediction.reshape(1, -1)
file = open("model.pkl","rb")
trained_model = joblib.load(file)
result = trained_model.predict(prediction)
return str(result[0])
if __name__ == "__main__":
app.run(debug=True) | [
"flask.render_template",
"flask.Flask",
"flask.request.form.get",
"numpy.array",
"joblib.load"
] | [((165, 180), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (170, 180), False, 'from flask import Flask, render_template, request\n'), ((243, 272), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (258, 272), False, 'from flask import Flask, render_template, request\n'), ((1054, 1074), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (1062, 1074), True, 'import numpy as np\n'), ((1173, 1190), 'joblib.load', 'joblib.load', (['file'], {}), '(file)\n', (1184, 1190), False, 'import joblib\n'), ((379, 402), 'flask.request.form.get', 'request.form.get', (['"""idd"""'], {}), "('idd')\n", (395, 402), False, 'from flask import Flask, render_template, request\n'), ((420, 444), 'flask.request.form.get', 'request.form.get', (['"""perc"""'], {}), "('perc')\n", (436, 444), False, 'from flask import Flask, render_template, request\n'), ((459, 482), 'flask.request.form.get', 'request.form.get', (['"""age"""'], {}), "('age')\n", (475, 482), False, 'from flask import Flask, render_template, request\n'), ((500, 526), 'flask.request.form.get', 'request.form.get', (['"""income"""'], {}), "('income')\n", (516, 526), False, 'from flask import Flask, render_template, request\n'), ((542, 568), 'flask.request.form.get', 'request.form.get', (['"""count1"""'], {}), "('count1')\n", (558, 568), False, 'from flask import Flask, render_template, request\n'), ((584, 610), 'flask.request.form.get', 'request.form.get', (['"""count2"""'], {}), "('count2')\n", (600, 610), False, 'from flask import Flask, render_template, request\n'), ((626, 652), 'flask.request.form.get', 'request.form.get', (['"""count3"""'], {}), "('count3')\n", (642, 652), False, 'from flask import Flask, render_template, request\n'), ((669, 692), 'flask.request.form.get', 'request.form.get', (['"""auc"""'], {}), "('auc')\n", (685, 692), False, 'from flask import Flask, render_template, request\n'), ((707, 730), 'flask.request.form.get', 'request.form.get', (['"""num"""'], {}), "('num')\n", (723, 730), False, 'from flask import Flask, render_template, request\n'), ((752, 781), 'flask.request.form.get', 'request.form.get', (['"""s_channel"""'], {}), "('s_channel')\n", (768, 781), False, 'from flask import Flask, render_template, request\n'), ((803, 832), 'flask.request.form.get', 'request.form.get', (['"""residence"""'], {}), "('residence')\n", (819, 832), False, 'from flask import Flask, render_template, request\n'), ((854, 883), 'flask.request.form.get', 'request.form.get', (['"""age_group"""'], {}), "('age_group')\n", (870, 883), False, 'from flask import Flask, render_template, request\n'), ((902, 928), 'flask.request.form.get', 'request.form.get', (['"""status"""'], {}), "('status')\n", (918, 928), False, 'from flask import Flask, render_template, request\n')] |
import numpy as np
import math
try:
import Image
except ImportError:
from PIL import Image as Image
import sys
from scipy import interpolate
import geopy.distance
try:
from pylab import figure, cm
except ImportError:
from matplotlib.pylab import figure, cm
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
def geoplot(latitudes, longitudes, counts):
def xyz(lat,lng):
phi, theta = np.radians(latitude), np.radians(longitude)
rho = 1
x = math.cos(phi) * math.cos(theta) * rho
y = math.cos(phi) * math.sin(theta) * rho
z = math.sin(phi) * rho
return x,y,z
xs, ys, zs = [], [], []
counts = []
for i in range(len(latitudes)):
latitude, longitude, count = latitudes[i], longitudes[i], counts[i]
x,y,z = xyz(latitude, longitude)
xs.append(x)
ys.append(y)
zs.append(z)
counts.append(count)
def geodist(p1, p2):
ed = np.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2 + (p2[2] - p1[2]) ** 2)
return 2*np.arcsin(ed/2)
interpolation_model = interpolate.Rbf(xs, ys, zs, counts, function='thin_plate')
themap = np.zeros((180,360))
for latitude in range(-89, 91):
for longitude in range(-180, 180):
x,y,z = xyz(latitude, longitude)
themap[90-latitude][longitude-180] = interpolation_model(x,y,z)
themap = themap[10:170]
plt.imshow(themap)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.radians",
"numpy.sqrt",
"numpy.arcsin",
"math.cos",
"numpy.zeros",
"math.sin",
"scipy.interpolate.Rbf",
"matplotlib.pyplot.show"
] | [((1007, 1065), 'scipy.interpolate.Rbf', 'interpolate.Rbf', (['xs', 'ys', 'zs', 'counts'], {'function': '"""thin_plate"""'}), "(xs, ys, zs, counts, function='thin_plate')\n", (1022, 1065), False, 'from scipy import interpolate\n'), ((1077, 1097), 'numpy.zeros', 'np.zeros', (['(180, 360)'], {}), '((180, 360))\n', (1085, 1097), True, 'import numpy as np\n'), ((1298, 1316), 'matplotlib.pyplot.imshow', 'plt.imshow', (['themap'], {}), '(themap)\n', (1308, 1316), True, 'from matplotlib import pyplot as plt\n'), ((1318, 1328), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1326, 1328), True, 'from matplotlib import pyplot as plt\n'), ((880, 955), 'numpy.sqrt', 'np.sqrt', (['((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2 + (p2[2] - p1[2]) ** 2)'], {}), '((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2 + (p2[2] - p1[2]) ** 2)\n', (887, 955), True, 'import numpy as np\n'), ((415, 435), 'numpy.radians', 'np.radians', (['latitude'], {}), '(latitude)\n', (425, 435), True, 'import numpy as np\n'), ((437, 458), 'numpy.radians', 'np.radians', (['longitude'], {}), '(longitude)\n', (447, 458), True, 'import numpy as np\n'), ((563, 576), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (571, 576), False, 'import math\n'), ((967, 984), 'numpy.arcsin', 'np.arcsin', (['(ed / 2)'], {}), '(ed / 2)\n', (976, 984), True, 'import numpy as np\n'), ((475, 488), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (483, 488), False, 'import math\n'), ((491, 506), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (499, 506), False, 'import math\n'), ((519, 532), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (527, 532), False, 'import math\n'), ((535, 550), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (543, 550), False, 'import math\n')] |
#-*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import sonnet as snt
from model.DAM_test import dam
from model.DNC import dnc
from loader import BAbITestBatchGenerator, BAbIData
FLAGS = tf.flags.FLAGS
# Model parameters
tf.flags.DEFINE_integer("embedding_size", 64, "Size of embedding.")
tf.flags.DEFINE_integer("hidden_size", 256, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("memory_address_size", 128, "The number of memory slots.")
tf.flags.DEFINE_integer("memory_length_size", 48, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_write_heads", 1, "Number of memory write heads.")
tf.flags.DEFINE_integer("num_read_heads", 4, "Number of memory read heads.")
tf.flags.DEFINE_float("keep_prob", 1.0, "Keep probability for bypass dropout")
tf.flags.DEFINE_integer("num_memory_blocks", 2, "Number of memory blocks.")
# Model selection.
tf.flags.DEFINE_boolean("dam", True, "Whether dam or not.")
# Testing options.
tf.flags.DEFINE_integer("batch_size", 200, "Batch size for training.")
tf.flags.DEFINE_string("name", "model", "Name of training model.")
tf.flags.DEFINE_integer("num", 97600, "Number of training iterations for Test.")
def run_model(input_data, sequence_length, output_size):
"""Runs model on input sequence."""
access_config = {
"memory_size": FLAGS.memory_address_size,
"word_size": FLAGS.memory_length_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
}
other_config = {
"keep_prob": FLAGS.keep_prob,
"num_memory_block": FLAGS.num_memory_blocks
}
if FLAGS.dam:
core = dam.DAM(access_config, controller_config, other_config, output_size)
else:
core = dnc.DNC(access_config, controller_config, output_size)
batch_size = tf.shape(input_data)[0]
initial_state = core.initial_state(batch_size)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=core,
inputs=input_data,
sequence_length=sequence_length,
time_major=False,
initial_state=initial_state)
return output_sequence
def test():
"""Trains the DNC and periodically reports the loss."""
test_data = BAbITestBatchGenerator()
dataset = BAbIData(None, test_data.input_size, test_data.output_size, FLAGS.embedding_size)
output_logits = run_model(dataset.processed_input_data, dataset.sequence_length, test_data.output_size)
softmaxed = tf.nn.softmax(output_logits)
saver = tf.train.Saver()
# Train.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver.restore(sess, os.path.join('info', FLAGS.name, 'checkpoint', 'model.ckpt-' + str(FLAGS.num)))
tasks_results = {}
tasks_names = {}
for t in os.listdir(test_data.test_data_dir):
task_number, task_name, test_size = test_data.feed_data(t)
tasks_names[task_number] = task_name
counter = 0
results = []
test_data.feed_batch_size(FLAGS.batch_size)
for idx in range(int(test_size / FLAGS.batch_size) + 1):
if idx == int(test_size / FLAGS.batch_size):
if test_size % FLAGS.batch_size == 0:
break
test_data.feed_batch_size(test_size % FLAGS.batch_size)
i_d, s_l, questions_indecies, target_mask, desired_answers = next(test_data)
softmax_output = sess.run([softmaxed], feed_dict={
dataset.input_data: i_d,
dataset.sequence_length: s_l,
})
softmax_output = np.squeeze(softmax_output, axis=0)
for astory, s_o, q_i, t_m, d_a in zip(i_d, softmax_output, questions_indecies, target_mask, desired_answers):
given_answers = np.argmax(s_o[t_m], axis=1)
answers_cursor = 0
for question_indx in q_i:
question_grade = []
targets_cursor = question_indx + 1
while targets_cursor < len(astory) and astory[targets_cursor] == test_data.target_code:
question_grade.append(given_answers[answers_cursor] == d_a[answers_cursor])
answers_cursor += 1
targets_cursor += 1
results.append(np.prod(question_grade))
counter += 1
error_rate = 1. - np.mean(results)
tasks_results[task_number] = error_rate
print("\r%s ... %.3f%% Error Rate.\n" % (task_name, error_rate * 100))
print("\n")
print("%-27s%s" % ("Task", "Result"))
print("-----------------------------------")
for k in range(20):
task_id = str(k + 1)
task_result = "%.2f%%" % (tasks_results[task_id] * 100)
print("%-27s%s" % (tasks_names[task_id], task_result))
print("-----------------------------------")
all_tasks_results = [v for _, v in tasks_results.iteritems()]
results_mean = "%.2f%%" % (np.mean(all_tasks_results) * 100)
failed_count = "%d" % (np.sum(np.array(all_tasks_results) > 0.05))
print("%-27s%s" % ("Mean Err.", results_mean))
print("%-27s%s" % ("Failed (err. > 5%)", failed_count))
def main(unused_argv):
tf.logging.set_verbosity(3) # Print INFO log messages.
test()
if __name__ == "__main__":
tf.app.run()
| [
"numpy.prod",
"tensorflow.shape",
"loader.BAbITestBatchGenerator",
"tensorflow.logging.set_verbosity",
"numpy.array",
"tensorflow.nn.softmax",
"model.DAM_test.dam.DAM",
"tensorflow.app.run",
"model.DNC.dnc.DNC",
"tensorflow.flags.DEFINE_string",
"numpy.mean",
"os.listdir",
"tensorflow.flags.... | [((362, 429), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""embedding_size"""', '(64)', '"""Size of embedding."""'], {}), "('embedding_size', 64, 'Size of embedding.')\n", (385, 429), True, 'import tensorflow as tf\n'), ((430, 503), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""hidden_size"""', '(256)', '"""Size of LSTM hidden layer."""'], {}), "('hidden_size', 256, 'Size of LSTM hidden layer.')\n", (453, 503), True, 'import tensorflow as tf\n'), ((504, 590), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""memory_address_size"""', '(128)', '"""The number of memory slots."""'], {}), "('memory_address_size', 128,\n 'The number of memory slots.')\n", (527, 590), True, 'import tensorflow as tf\n'), ((587, 674), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""memory_length_size"""', '(48)', '"""The width of each memory slot."""'], {}), "('memory_length_size', 48,\n 'The width of each memory slot.')\n", (610, 674), True, 'import tensorflow as tf\n'), ((671, 749), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_write_heads"""', '(1)', '"""Number of memory write heads."""'], {}), "('num_write_heads', 1, 'Number of memory write heads.')\n", (694, 749), True, 'import tensorflow as tf\n'), ((750, 826), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_read_heads"""', '(4)', '"""Number of memory read heads."""'], {}), "('num_read_heads', 4, 'Number of memory read heads.')\n", (773, 826), True, 'import tensorflow as tf\n'), ((827, 905), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""keep_prob"""', '(1.0)', '"""Keep probability for bypass dropout"""'], {}), "('keep_prob', 1.0, 'Keep probability for bypass dropout')\n", (848, 905), True, 'import tensorflow as tf\n'), ((906, 981), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_memory_blocks"""', '(2)', '"""Number of memory blocks."""'], {}), "('num_memory_blocks', 2, 'Number of memory blocks.')\n", (929, 981), True, 'import tensorflow as tf\n'), ((1002, 1061), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""dam"""', '(True)', '"""Whether dam or not."""'], {}), "('dam', True, 'Whether dam or not.')\n", (1025, 1061), True, 'import tensorflow as tf\n'), ((1082, 1152), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(200)', '"""Batch size for training."""'], {}), "('batch_size', 200, 'Batch size for training.')\n", (1105, 1152), True, 'import tensorflow as tf\n'), ((1153, 1219), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""name"""', '"""model"""', '"""Name of training model."""'], {}), "('name', 'model', 'Name of training model.')\n", (1175, 1219), True, 'import tensorflow as tf\n'), ((1220, 1305), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num"""', '(97600)', '"""Number of training iterations for Test."""'], {}), "('num', 97600, 'Number of training iterations for Test.'\n )\n", (1243, 1305), True, 'import tensorflow as tf\n'), ((2107, 2239), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'core', 'inputs': 'input_data', 'sequence_length': 'sequence_length', 'time_major': '(False)', 'initial_state': 'initial_state'}), '(cell=core, inputs=input_data, sequence_length=\n sequence_length, time_major=False, initial_state=initial_state)\n', (2124, 2239), True, 'import tensorflow as tf\n'), ((2395, 2419), 'loader.BAbITestBatchGenerator', 'BAbITestBatchGenerator', ([], {}), '()\n', (2417, 2419), False, 'from loader import BAbITestBatchGenerator, BAbIData\n'), ((2434, 2520), 'loader.BAbIData', 'BAbIData', (['None', 'test_data.input_size', 'test_data.output_size', 'FLAGS.embedding_size'], {}), '(None, test_data.input_size, test_data.output_size, FLAGS.\n embedding_size)\n', (2442, 2520), False, 'from loader import BAbITestBatchGenerator, BAbIData\n'), ((2641, 2669), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output_logits'], {}), '(output_logits)\n', (2654, 2669), True, 'import tensorflow as tf\n'), ((2683, 2699), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2697, 2699), True, 'import tensorflow as tf\n'), ((2727, 2743), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2741, 2743), True, 'import tensorflow as tf\n'), ((5615, 5642), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['(3)'], {}), '(3)\n', (5639, 5642), True, 'import tensorflow as tf\n'), ((5715, 5727), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5725, 5727), True, 'import tensorflow as tf\n'), ((1839, 1907), 'model.DAM_test.dam.DAM', 'dam.DAM', (['access_config', 'controller_config', 'other_config', 'output_size'], {}), '(access_config, controller_config, other_config, output_size)\n', (1846, 1907), False, 'from model.DAM_test import dam\n'), ((1933, 1987), 'model.DNC.dnc.DNC', 'dnc.DNC', (['access_config', 'controller_config', 'output_size'], {}), '(access_config, controller_config, output_size)\n', (1940, 1987), False, 'from model.DNC import dnc\n'), ((2006, 2026), 'tensorflow.shape', 'tf.shape', (['input_data'], {}), '(input_data)\n', (2014, 2026), True, 'import tensorflow as tf\n'), ((2796, 2821), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2806, 2821), True, 'import tensorflow as tf\n'), ((3011, 3046), 'os.listdir', 'os.listdir', (['test_data.test_data_dir'], {}), '(test_data.test_data_dir)\n', (3021, 3046), False, 'import os\n'), ((3877, 3911), 'numpy.squeeze', 'np.squeeze', (['softmax_output'], {'axis': '(0)'}), '(softmax_output, axis=0)\n', (3887, 3911), True, 'import numpy as np\n'), ((4731, 4747), 'numpy.mean', 'np.mean', (['results'], {}), '(results)\n', (4738, 4747), True, 'import numpy as np\n'), ((5357, 5383), 'numpy.mean', 'np.mean', (['all_tasks_results'], {}), '(all_tasks_results)\n', (5364, 5383), True, 'import numpy as np\n'), ((4074, 4101), 'numpy.argmax', 'np.argmax', (['s_o[t_m]'], {'axis': '(1)'}), '(s_o[t_m], axis=1)\n', (4083, 4101), True, 'import numpy as np\n'), ((5429, 5456), 'numpy.array', 'np.array', (['all_tasks_results'], {}), '(all_tasks_results)\n', (5437, 5456), True, 'import numpy as np\n'), ((4642, 4665), 'numpy.prod', 'np.prod', (['question_grade'], {}), '(question_grade)\n', (4649, 4665), True, 'import numpy as np\n')] |
""" Defines ArrayPlotData.
"""
import six
import six.moves as sm
from numpy import array, ndarray
# Enthought library imports
from traits.api import Dict
# Local, relative imports
from .abstract_plot_data import AbstractPlotData
from .abstract_data_source import AbstractDataSource
class ArrayPlotData(AbstractPlotData):
""" A PlotData implementation class that handles a list of Numpy arrays
(or a 2-D Numpy array).
By default, it doesn't allow its input data to be modified by downstream
Chaco components or interactors.
"""
#-------------------------------------------------------------------------
# Public traits
#-------------------------------------------------------------------------
#: Map of names to arrays. Although there is no restriction on the array
#: dimensions, each array must correspond to a single plot item; that
#: is, a single name must not map to a multi-dimensional array unless
#: the array is being used for an image plot or for something that can handle
#: multi-dimensional input data.
arrays = Dict
#: Consumers can write data to this object (overrides AbstractPlotData).
writable = True
def __init__(self, *data, **kw):
""" ArrayPlotData can be constructed by passing in arrays.
Keyword arguments can be used to give certain arrays specific names;
unnamed arrays are given a generic name of the format 'seriesN', where
N is its position in the argument list.
For example::
ArrayPlotData(array1, array2, index=array3, foo=array4)
This call results in the creation of four entries in self.arrays::
'series1' -> array1
'series2' -> array2
'index' -> array3
'foo' -> array4
If any names in the keyword parameter list collide with the
auto-generated positional names "series1", "series2", etc., then those
arrays are replaced.
Note that this factor means that keyword traits are *not* set using the
keyword parameters in the constructor. This strategy defies some
conventions, but was it chosen for convenience, since the raison d'etre
of this class is convenience.
"""
super(AbstractPlotData, self).__init__()
self._update_data(kw)
data = dict(sm.zip(self._generate_names(len(data)), data))
self._update_data(data)
#------------------------------------------------------------------------
# AbstractPlotData Interface
#------------------------------------------------------------------------
def list_data(self):
""" Returns a list of the names of the arrays managed by this instance.
"""
return list(self.arrays.keys())
def get_data(self, name):
""" Returns the array associated with *name*.
Implements AbstractDataSource.
"""
return self.arrays.get(name, None)
def del_data(self, name):
""" Deletes the array specified by *name*, or raises a KeyError if
the named array does not exist.
"""
if not self.writable:
return None
if name in self.arrays:
del self.arrays[name]
self.data_changed = {'removed': [name]}
else:
raise KeyError("Data series '%s' does not exist." % name)
def set_data(self, name, new_data, generate_name=False):
""" Sets the specified array as the value for either the specified
name or a generated name.
If the instance's `writable` attribute is True, then this method sets
the data associated with the given name to the new value, otherwise it
does nothing.
Parameters
----------
name : string
The name of the array whose value is to be set.
new_data : array
The array to set as the value of *name*.
generate_name : Boolean
If True, a unique name of the form 'seriesN' is created for the
array, and is used in place of *name*. The 'N' in 'seriesN' is
one greater the largest N already used.
Returns
-------
The name under which the array was set.
See Also
--------
update_data: Use if needing to set multiple ArrayPlotData entries at
once, for example because new arrays' dimensions change and
updating one at a time would break an existing Plot.
"""
if not self.writable:
return None
if generate_name:
names = self._generate_names(1)
name = names[0]
self.update_data({name: new_data})
return name
def update_data(self, *args, **kwargs):
""" Updates any number of arrays before triggering a `data_changed`
event.
Useful to set multiple ArrayPlotData entries at once, for example
because new arrays' dimensions change and updating one at a time would
break an existing Plot.
Note: Implements AbstractPlotData's update_data() method. This method
has the same signature as the dictionary update() method.
See Also
--------
set_data: Simpler interface to set only 1 entry at a time.
"""
if not self.writable:
return None
data = dict(*args, **kwargs)
event = {}
for name in data:
if name in self.arrays:
event.setdefault('changed', []).append(name)
else:
event.setdefault('added', []).append(name)
self._update_data(data)
self.data_changed = event
def set_selection(self, name, selection):
""" Overrides AbstractPlotData to do nothing and not raise an error.
"""
pass
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _generate_names(self, n):
""" Generate n new names
"""
max_index = max(self._generate_indices())
names = ["series{0:d}".format(n) for n in range(max_index+1, max_index+n+1)]
return names
def _generate_indices(self):
""" Generator that yields all integers that match "series%d" in keys
"""
yield 0 # default minimum
for name in self.list_data():
if name.startswith('series'):
try:
v = int(name[6:])
except ValueError:
continue
yield v
def _update_data(self, data):
""" Update the array, ensuring that data is an array
"""
# note that this call modifies data, but that's OK since the callers
# all create the dictionary that they pass in
for name, value in list(data.items()):
if not isinstance(value, (ndarray, AbstractDataSource)):
data[name] = array(value)
else:
data[name] = value
self.arrays.update(data)
| [
"numpy.array"
] | [((7048, 7060), 'numpy.array', 'array', (['value'], {}), '(value)\n', (7053, 7060), False, 'from numpy import array, ndarray\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Find optimal scale for quantization by minimizing KL-divergence"""
import ctypes
import numpy as np
from . import _ffi_api
def _find_scale_by_kl(arr, quantized_dtype='int8',
num_bins=8001, num_quantized_bins=255):
"""Given a tensor, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
assert isinstance(arr, np.ndarray)
min_val = np.min(arr)
max_val = np.max(arr)
thres = max(abs(min_val), abs(max_val))
if min_val >= 0 and quantized_dtype in ['uint8']:
# We need to move negative bins to positive bins to fit uint8 range.
num_quantized_bins = num_quantized_bins * 2 + 1
def get_pointer(arr, ctypes_type):
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes_type))
return ctypes.cast(ptr, ctypes.c_void_p)
hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-thres, thres))
hist_ptr = get_pointer(hist.astype(np.int32), ctypes.c_int)
hist_edges_ptr = get_pointer(hist_edges, ctypes.c_float)
return _ffi_api.FindScaleByKLMinimization(hist_ptr, hist_edges_ptr,
num_bins, num_quantized_bins)
| [
"numpy.histogram",
"ctypes.POINTER",
"numpy.max",
"ctypes.cast",
"numpy.min"
] | [((1406, 1417), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (1412, 1417), True, 'import numpy as np\n'), ((1432, 1443), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (1438, 1443), True, 'import numpy as np\n'), ((1851, 1906), 'numpy.histogram', 'np.histogram', (['arr'], {'bins': 'num_bins', 'range': '(-thres, thres)'}), '(arr, bins=num_bins, range=(-thres, thres))\n', (1863, 1906), True, 'import numpy as np\n'), ((1793, 1826), 'ctypes.cast', 'ctypes.cast', (['ptr', 'ctypes.c_void_p'], {}), '(ptr, ctypes.c_void_p)\n', (1804, 1826), False, 'import ctypes\n'), ((1749, 1776), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes_type'], {}), '(ctypes_type)\n', (1763, 1776), False, 'import ctypes\n')] |
#!/usr/bin/env python3
from enum import Enum
from typing import Any, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from captum.log import log_usage
from ..._utils.common import (
ExpansionTypes,
_expand_additional_forward_args,
_expand_target,
_format_additional_forward_args,
_format_input,
_format_tensor_into_tuples,
_is_tuple,
)
from .._utils.attribution import Attribution
from .._utils.common import (
_format_attributions,
_format_baseline,
_validate_input,
_validate_noise_tunnel_type,
)
class NoiseTunnelType(Enum):
smoothgrad = 1
smoothgrad_sq = 2
vargrad = 3
SUPPORTED_NOISE_TUNNEL_TYPES = list(NoiseTunnelType.__members__.keys())
class NoiseTunnel(Attribution):
r"""
Adds gaussian noise to each input in the batch `n_samples` times
and applies the given attribution algorithm to each of the samples.
The attributions of the samples are combined based on the given noise
tunnel type (nt_type):
If nt_type is `smoothgrad`, the mean of the sampled attributions is
returned. This approximates smoothing the given attribution method
with a Gaussian Kernel.
If nt_type is `smoothgrad_sq`, the mean of the squared sample attributions
is returned.
If nt_type is `vargrad`, the variance of the sample attributions is
returned.
More details about adding noise can be found in the following papers:
https://arxiv.org/abs/1810.03292
https://arxiv.org/abs/1810.03307
https://arxiv.org/abs/1706.03825
https://arxiv.org/pdf/1806.10758
This method currently also supports batches of multiple examples input,
however it can be computationally expensive depending on the model,
the dimensionality of the data and execution environment.
It is assumed that the batch size is the first dimension of input tensors.
"""
def __init__(self, attribution_method: Attribution) -> None:
r"""
Args:
attribution_method (Attribution): An instance of any attribution algorithm
of type `Attribution`. E.g. Integrated Gradients,
Conductance or Saliency.
"""
self.attribution_method = attribution_method
self.is_delta_supported = self.attribution_method.has_convergence_delta()
Attribution.__init__(self, self.attribution_method.forward_func)
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
nt_type: str = "smoothgrad",
n_samples: int = 5,
stdevs: Union[float, Tuple[float, ...]] = 1.0,
draw_baseline_from_distrib: bool = False,
**kwargs: Any,
):
r"""
Args:
inputs (tensor or tuple of tensors): Input for which integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
nt_type (string, optional): Smoothing type of the attributions.
`smoothgrad`, `smoothgrad_sq` or `vargrad`
Default: `smoothgrad` if `type` is not provided.
n_samples (int, optional): The number of randomly generated examples
per sample in the input batch. Random examples are
generated by adding gaussian random noise to each sample.
Default: `5` if `n_samples` is not provided.
stdevs (float, or a tuple of floats optional): The standard deviation
of gaussian noise with zero mean that is added to each
input in the batch. If `stdevs` is a single float value
then that same value is used for all inputs. If it is
a tuple, then it must have the same length as the inputs
tuple. In this case, each stdev value in the stdevs tuple
corresponds to the input with the same index in the inputs
tuple.
Default: `1.0` if `stdevs` is not provided.
draw_baseline_from_distrib (bool, optional): Indicates whether to
randomly draw baseline samples from the `baselines`
distribution provided as an input tensor.
Default: False
**kwargs (Any, optional): Contains a list of arguments that are passed
to `attribution_method` attribution algorithm.
Any additional arguments that should be used for the
chosen attribution method should be included here.
For instance, such arguments include
`additional_forward_args` and `baselines`.
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution with
respect to each input feature. attributions will always be
the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
- **delta** (*float*, returned if return_convergence_delta=True):
Approximation error computed by the
attribution algorithm. Not all attribution algorithms
return delta value. It is computed only for some
algorithms, e.g. integrated gradients.
Delta is computed for each input in the batch
and represents the arithmetic mean
across all `n_sample` perturbed tensors for that input.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> net = ImageClassifier()
>>> ig = IntegratedGradients(net)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Creates noise tunnel
>>> nt = NoiseTunnel(ig)
>>> # Generates 10 perturbed input tensors per image.
>>> # Computes integrated gradients for class 3 for each generated
>>> # input and averages attributions accros all 10
>>> # perturbed inputs per image
>>> attribution = nt.attribute(input, nt_type='smoothgrad',
>>> n_samples=10, target=3)
"""
def add_noise_to_inputs() -> Tuple[Tensor, ...]:
if isinstance(stdevs, tuple):
assert len(stdevs) == len(inputs), (
"The number of input tensors "
"in {} must be equal to the number of stdevs values {}".format(
len(inputs), len(stdevs)
)
)
else:
assert isinstance(
stdevs, float
), "stdevs must be type float. " "Given: {}".format(type(stdevs))
stdevs_ = (stdevs,) * len(inputs)
return tuple(
add_noise_to_input(input, stdev)
for (input, stdev) in zip(inputs, stdevs_)
)
def add_noise_to_input(input: Tensor, stdev: float) -> Tensor:
# batch size
bsz = input.shape[0]
# expand input size by the number of drawn samples
input_expanded_size = (bsz * n_samples,) + input.shape[1:]
# expand stdev for the shape of the input and number of drawn samples
stdev_expanded = torch.tensor(stdev, device=input.device).repeat(
input_expanded_size
)
# draws `np.prod(input_expanded_size)` samples from normal distribution
# with given input parametrization
# FIXME it look like it is very difficult to make torch.normal
# deterministic this needs an investigation
noise = torch.normal(0, stdev_expanded)
return input.repeat_interleave(n_samples, dim=0) + noise
def expand_and_update_baselines():
def get_random_baseline_indices(bsz, baseline):
num_ref_samples = baseline.shape[0]
return np.random.choice(num_ref_samples, n_samples * bsz).tolist()
# TODO allow to add noise to baselines as well
# expand baselines to match the sizes of input
if "baselines" not in kwargs:
return
baselines = kwargs["baselines"]
baselines = _format_baseline(baselines, inputs)
_validate_input(
inputs, baselines, draw_baseline_from_distrib=draw_baseline_from_distrib
)
if draw_baseline_from_distrib:
bsz = inputs[0].shape[0]
baselines = tuple(
baseline[get_random_baseline_indices(bsz, baseline)]
if isinstance(baseline, torch.Tensor)
else baseline
for baseline in baselines
)
else:
baselines = tuple(
baseline.repeat_interleave(n_samples, dim=0)
if isinstance(baseline, torch.Tensor)
and baseline.shape[0] == input.shape[0]
and baseline.shape[0] > 1
else baseline
for input, baseline in zip(inputs, baselines)
)
# update kwargs with expanded baseline
kwargs["baselines"] = baselines
def expand_and_update_additional_forward_args():
if "additional_forward_args" not in kwargs:
return
additional_forward_args = kwargs["additional_forward_args"]
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
if additional_forward_args is None:
return
additional_forward_args = _expand_additional_forward_args(
additional_forward_args,
n_samples,
expansion_type=ExpansionTypes.repeat_interleave,
)
# update kwargs with expanded baseline
kwargs["additional_forward_args"] = additional_forward_args
def expand_and_update_target():
if "target" not in kwargs:
return
target = kwargs["target"]
target = _expand_target(
target, n_samples, expansion_type=ExpansionTypes.repeat_interleave
)
# update kwargs with expanded baseline
kwargs["target"] = target
def compute_expected_attribution_and_sq(attribution):
bsz = attribution.shape[0] // n_samples
attribution_shape = (bsz, n_samples)
if len(attribution.shape) > 1:
attribution_shape += attribution.shape[1:]
attribution = attribution.view(attribution_shape)
expected_attribution = attribution.mean(dim=1, keepdim=False)
expected_attribution_sq = torch.mean(attribution ** 2, dim=1, keepdim=False)
return expected_attribution, expected_attribution_sq
# Keeps track whether original input is a tuple or not before
# converting it into a tuple.
is_inputs_tuple = isinstance(inputs, tuple)
inputs = _format_input(inputs)
_validate_noise_tunnel_type(nt_type, SUPPORTED_NOISE_TUNNEL_TYPES)
delta = None
inputs_with_noise = add_noise_to_inputs()
# if the algorithm supports targets, baselines and/or additional_forward_args
# they will be expanded based on the n_steps and corresponding kwargs
# variables will be updated accordingly
expand_and_update_baselines()
expand_and_update_additional_forward_args()
expand_and_update_target()
# smoothgrad_Attr(x) = 1 / n * sum(Attr(x + N(0, sigma^2))
# NOTE: using __wrapped__ such that it does not log the inner logs
attributions = self.attribution_method.attribute.__wrapped__( # type: ignore
self.attribution_method, # self
inputs_with_noise if is_inputs_tuple else inputs_with_noise[0],
**kwargs,
)
return_convergence_delta = (
"return_convergence_delta" in kwargs and kwargs["return_convergence_delta"]
)
if self.is_delta_supported and return_convergence_delta:
attributions, delta = attributions
is_attrib_tuple = _is_tuple(attributions)
attributions = _format_tensor_into_tuples(attributions)
expected_attributions = []
expected_attributions_sq = []
for attribution in attributions:
expected_attr, expected_attr_sq = compute_expected_attribution_and_sq(
attribution
)
expected_attributions.append(expected_attr)
expected_attributions_sq.append(expected_attr_sq)
if NoiseTunnelType[nt_type] == NoiseTunnelType.smoothgrad:
return self._apply_checks_and_return_attributions(
tuple(expected_attributions),
is_attrib_tuple,
return_convergence_delta,
delta,
)
if NoiseTunnelType[nt_type] == NoiseTunnelType.smoothgrad_sq:
return self._apply_checks_and_return_attributions(
tuple(expected_attributions_sq),
is_attrib_tuple,
return_convergence_delta,
delta,
)
vargrad = tuple(
expected_attribution_sq - expected_attribution * expected_attribution
for expected_attribution, expected_attribution_sq in zip(
expected_attributions, expected_attributions_sq
)
)
return self._apply_checks_and_return_attributions(
vargrad, is_attrib_tuple, return_convergence_delta, delta
)
def _apply_checks_and_return_attributions(
self,
attributions: Tuple[Tensor, ...],
is_attrib_tuple: bool,
return_convergence_delta: bool,
delta: Union[None, Tensor],
):
attributions = _format_attributions(is_attrib_tuple, attributions)
return (
(attributions, delta)
if self.is_delta_supported and return_convergence_delta
else attributions
)
def has_convergence_delta(self) -> bool:
return self.is_delta_supported
| [
"torch.mean",
"numpy.random.choice",
"captum.log.log_usage",
"torch.tensor",
"torch.normal"
] | [((2433, 2444), 'captum.log.log_usage', 'log_usage', ([], {}), '()\n', (2442, 2444), False, 'from captum.log import log_usage\n'), ((8839, 8870), 'torch.normal', 'torch.normal', (['(0)', 'stdev_expanded'], {}), '(0, stdev_expanded)\n', (8851, 8870), False, 'import torch\n'), ((11995, 12045), 'torch.mean', 'torch.mean', (['(attribution ** 2)'], {'dim': '(1)', 'keepdim': '(False)'}), '(attribution ** 2, dim=1, keepdim=False)\n', (12005, 12045), False, 'import torch\n'), ((8457, 8497), 'torch.tensor', 'torch.tensor', (['stdev'], {'device': 'input.device'}), '(stdev, device=input.device)\n', (8469, 8497), False, 'import torch\n'), ((9119, 9169), 'numpy.random.choice', 'np.random.choice', (['num_ref_samples', '(n_samples * bsz)'], {}), '(num_ref_samples, n_samples * bsz)\n', (9135, 9169), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy import int64
# Logger
from logging import getLogger
logger = getLogger(__name__)
# Dictionary for pol labels and their IDs in UVFITS
polid2name = {
"+1": "I",
"+2": "Q",
"+3": "U",
"+4": "V",
"-1": "RR",
"-2": "LL",
"-3": "RL",
"-4": "LR",
"-5": "XX",
"-6": "YY",
"-7": "XY",
"-8": "YX",
}
polname2id = {}
for key in polid2name.keys():
polname2id[polid2name[key]] = int64(key)
def uvfits2UVData(inuvfits, scangap=None, nseg=2, outfile=None, group="", format="netcdf", mode="w", printlevel=0):
"""
Load an uvfits file. Currently, this function can read only single-source,
single-frequency-setup, single-array data correctly.
Args:
uvfits (string or pyfits.HDUList object):
Input uvfits data
scangap (float or astropy.units.Quantity, optional):
Minimal time seperation between scans.
If not specfied, this will be guessed from data segmentation (see nseg).
If a float value is specified, its unit is assumuted to be in seconds.
Defaults to None.
nseg (float, optional):
If scangap is None, the minimal time seperation between scans
will be set to nseg * minimal_data_segementation_time.
Defaults to 2.
printlevel (integer, optional):
print some notes. 0: silient 3: maximum level
Returns:
uvdata.UVData object
"""
import astropy.io.fits as pf
import zarr
from ..uvdata import UVData
if format not in ["zarr", "netcdf"]:
raise ValueError("available format is 'zarr' or 'netcdf'.")
# check input files
if isinstance(inuvfits, type("")):
hdulist = pf.open(inuvfits)
closehdu = True
else:
hdulist = inuvfits
closehdu = False
# print HDU info if requested.
if printlevel > 0:
hdulist.info()
print("")
# load data
ghdu, antab, fqtab = uvfits2HDUs(hdulist)
# create output file
if outfile is not None and mode == "w":
if format == "zarr":
import zarr
z = zarr.open(outfile, mode='w')
elif format == "netcdf":
import netCDF4
d = netCDF4.Dataset(outfile, mode="w")
d.close()
def save_ds(ds):
import os
groupname = os.path.join(group, ds.group)
if outfile is not None:
if format == "zarr":
ds.to_zarr(outfile, group=groupname, mode="a")
elif format == "netcdf":
ds.to_netcdf(outfile, group=groupname, mode="a")
del ds
# Load info from HDU
# Frequency
freqds = uvfits2freq(ghdu=ghdu, antab=antab, fqtab=fqtab)
del fqtab
save_ds(freqds)
# Antenna
antds = uvfits2ant(antab=antab)
del antab
save_ds(antds)
# Source
srcds = uvfits2src(ghdu=ghdu)
save_ds(srcds)
# Visibilities
visds = uvfits2vis(ghdu=ghdu)
del ghdu
# close HDU if this is loaded from a file
if closehdu:
hdulist.close()
# Detect scans and save visibilities and scaninfo to zarr file
visds.set_scan(scangap=scangap, nseg=2)
save_ds(visds)
scands = visds.gen_scandata()
save_ds(scands)
if outfile is None:
uvd = UVData(
freq=freqds,
src=srcds,
scan=scands,
vis=visds,
ant=antds
)
return uvd
else:
if format == "zarr":
from .zarr import zarr2UVData
return zarr2UVData(outfile, group=group)
elif format == "netcdf":
from .netcdf import netcdf2UVData
return netcdf2UVData(outfile, group=group)
def uvfits2HDUs(hdulist):
"""
Read HDUList, and get PrimaryHDU & HDUS for AIPS AN/FQ Tables
Args:
hdulist (astropy.io.fits.HDUList): hdulist
Returns:
Group HDU
HDU for AIPS AN Table
HDU for AIPS FQ Table
"""
hduinfo = hdulist.info(output=False)
Nhdu = len(hduinfo)
fqtab = None
antab = None
ghdu = None
for ihdu in range(Nhdu):
hduname = hduinfo[ihdu][1]
if "PRIMARY" in hduname.upper():
if ghdu is not None:
logger.warning("This UVFITS has more than two Primary HDUs.")
logger.warning("The later one will be taken.")
ghdu = hdulist[ihdu]
elif "FQ" in hduname.upper():
if fqtab is not None:
logger.warning("This UVFITS has more than two AIPS FQ tables.")
logger.warning("The later one will be taken.")
fqtab = hdulist[ihdu]
elif "AN" in hduname.upper():
if antab is not None:
logger.warning("This UVFITS has more than two AIPS AN tables.")
logger.warning("The later one will be taken.")
antab = hdulist[ihdu]
return ghdu, antab, fqtab
def uvfits2vis(ghdu):
"""
Load the array information from uvfits's AIPS AN table into the SMILI format.
Args:
ghdu (astropy.io.fits.HDU): Group (Primary) HDU
Returns:
VisData: complex visibility in SMILI format
"""
from ..vis.vis import VisData
from astropy.time import Time
from xarray import Dataset
from numpy import float64, int32, int64, zeros, where, power
from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff
# read visibilities
# uvfits's original dimension is [data,dec,ra,if,ch,pol,complex]
Ndata, Ndec, Nra, dammy, dammy, Npol, dammy = ghdu.data.data.shape
del dammy
if Nra > 1 or Ndec > 1:
logger.warning(
"GroupHDU has more than single coordinates (Nra, Ndec)=(%d, %d)." % (Nra, Ndec))
logger.warning("We will pick up only the first one.")
vis_ghdu = ghdu.data.data[:, 0, 0, :] # to [data,if,ch,pol,complex]
# get visibilities, errors, and flag (flagged, removed,)
vcmp = float64(vis_ghdu[:, :, :, :, 0]) + 1j * \
float64(vis_ghdu[:, :, :, :, 1])
sigma = float64(power(abs(vis_ghdu[:, :, :, :, 2]), -0.5))
flag = int32(sign(vis_ghdu[:, :, :, :, 2]))
# check sigma
idx = where(isinf(sigma))
sigma[idx] = 0
flag[idx] = 0
idx = where(isnan(sigma))
sigma[idx] = 0
flag[idx] = 0
idx = where(sigma < finfo(float64).eps)
sigma[idx] = 0
flag[idx] = 0
# Read Random Parameters
paridxes = [None for i in range(9)]
parnames = ghdu.data.parnames
Npar = len(parnames)
jd1 = zeros(Ndata)
jd2 = zeros(Ndata)
for i in range(Npar):
parname = parnames[i]
if "UU" in parname:
paridxes[0] = i+1
usec = float64(ghdu.data.par(i))
if "VV" in parname:
paridxes[1] = i+1
vsec = float64(ghdu.data.par(i))
if "WW" in parname:
paridxes[2] = i+1
wsec = float64(ghdu.data.par(i))
if "DATE" in parname:
if paridxes[3] is None:
paridxes[3] = i+1
jd1 = float64(ghdu.data.par(i))
elif paridxes[4] is None:
paridxes[4] = i+1
jd2 = float64(ghdu.data.par(i))
else:
errmsg = "Random Parameters have too many 'DATE' columns."
raise ValueError(errmsg)
if "BASELINE" in parname:
paridxes[5] = i+1
bl = float64(ghdu.data.par(i))
if "SOURCE" in parname:
paridxes[6] = i+1
srcid = int32(ghdu.data.par(i))
if "INTTIM" in parname:
paridxes[7] = i+1
inttim = float64(ghdu.data.par(i))
if "FREQSEL" in parname:
paridxes[8] = i+1
freqsel = int32(ghdu.data.par(i))
# convert JD to MJD
mjd = Time(jd1, jd2, format="jd").mjd
# warn if it is an apparently multi source file
if paridxes[6] is not None:
if len(unique(srcid)) > 1:
logger.warning(
"Group HDU contains data on more than a single source.")
logger.warning(
"It will likely cause a problem since SMILI assumes a singlesource UVFITS.")
# Integration time in the unit of day
if paridxes[7] is None:
logger.warning(
"Group HDU do not have a random parameter for the integration time.")
logger.warning(
"It will be estimated with a minimal time interval of data.")
dmjd = min(abs(diff(unique(mjd))))
else:
dmjd = inttim/86400
# warn if data are apparently with multi IF setups
if paridxes[8] is not None:
if len(unique(freqsel)) > 1:
logger.warning(
"Group HDU contains data on more than a frequency setup.")
logger.warning(
"It will likely cause a problem since SMILI assumes a UVFITS with a single setup.")
# antenna ID
subarray, bl = modf(bl)
subarray = int64(100*(subarray)+1)
antid1 = int64(bl//256)-1
antid2 = int64(bl % 256)-1
if len(unique(subarray)) > 1:
logger.warning("Group HDU contains data with 2 or more subarrays.")
logger.warning(
"It will likely cause a problem, since SMILI assumes UVFITS for a single subarray.")
# read polarizations
polids = ghdu.header["CDELT3"] * \
(arange(Npol)+1-ghdu.header["CRPIX3"])+ghdu.header["CRVAL3"]
pol = [polid2name["%+d" % (polid)] for polid in polids]
# form a data array
ds = Dataset(
data_vars=dict(
vis=(["data", "spw", "ch", "pol"], vcmp)
),
coords=dict(
mjd=("data", mjd),
dmjd=("data", dmjd),
usec=("data", usec),
vsec=("data", vsec),
wsec=("data", wsec),
antid1=("data", antid1),
antid2=("data", antid2),
flag=(["data", "spw", "ch", "pol"], flag),
sigma=(["data", "spw", "ch", "pol"], sigma),
pol=(["pol"], pol),
)
)
return VisData(ds=ds.sortby(["mjd", "antid1", "antid2"]))
def uvfits2ant(antab):
"""
Load the rray information from uvfits's AIPS AN table into the SMILI format.
Args:
antab (astropy.io.fits.HDU): HDU for AIPS AN table
Returns:
AntData: array information in SMILI format
"""
from numpy import asarray, zeros, ones, unique
from ..ant.ant import AntData
from xarray import Dataset
# The array name
name = antab.header["ARRNAM"]
# Number of Antenna
Nant = len(antab.data)
# Anteanna Name
antname = antab.data["ANNAME"].tolist()
# XYZ Coordinates
xyz = antab.data["STABXYZ"]
# Parse Field Rotation Information
# See AIPS MEMO 117
# 0: ALT-AZ, 1: Eq, 2: Orbit, 3: X-Y, 4: Naismith-R, 5: Naismith-L
# 6: Manual
mntsta = antab.data["MNTSTA"]
fr_pa_coeff = ones(Nant)
fr_el_coeff = zeros(Nant)
fr_offset = zeros(Nant)
for i in range(Nant):
if mntsta[i] == 0: # azel
fr_pa_coeff[i] = 1
fr_el_coeff[i] = 0
elif mntsta[i] == 1: # Equatorial
fr_pa_coeff[i] = 0
fr_el_coeff[i] = 0
elif mntsta[i] == 4: # Nasmyth-R
fr_pa_coeff[i] = 1
fr_el_coeff[i] = 1
elif mntsta[i] == 5: # Nasmyth-L
fr_pa_coeff[i] = 1
fr_el_coeff[i] = -1
else:
logger.warning("MNTSTA %d at Station %s is not supported currently." % (
mntsta[i], antname[i]))
# check polarization
pola = unique(antab.data["POLTYA"])
polb = unique(antab.data["POLTYB"])
if len(pola) > 1 or len(polb) > 1:
msg = "POLTYA or POLTYB have more than a single polarization"
logger.error(msg)
raise ValueError(msg)
pol = [pola[0], polb[0]]
# assume all of them are ground array
anttype = asarray(["g" for i in range(Nant)], dtype="U8")
antdata = AntData(
ds=Dataset(
coords=dict(
antname=("ant", antname),
x=("ant", xyz[:, 0]),
y=("ant", xyz[:, 1]),
z=("ant", xyz[:, 2]),
fr_pa_coeff=("ant", fr_pa_coeff),
fr_el_coeff=("ant", fr_el_coeff),
fr_offset=("ant", fr_offset),
anttype=("ant", anttype),
pol=("pol", pol)
),
attrs=dict(
name=name,
),
)
)
antdata.init_coords()
return antdata
def uvfits2freq(ghdu, antab, fqtab):
"""
Load the frequency information from uvfits HDUs into the SMILI format.
Args:
ghdu (astropy.io.fits.HDU): Group (Primary) HDU
antab (astropy.io.fits.HDU): HDU for AIPS AN table
fqtab (astropy.io.fits.HDU): HDU for AIPS FQ table
Returns:
FreqData: Loaded frequency table
"""
from ..freq import FreqData
from xarray import Dataset
from numpy import float64
# read meta data from antenna table
reffreq = antab.header["FREQ"] # reference frequency in GHz
# use array name because uvfits doesn't have such meta data
name = antab.header["ARRNAM"]
# get number of channels
dammy, dammy, dammy, Nspw, Nch, dammy, dammy = ghdu.data.data.shape
del dammy
# read data from frequency table
nfrqsel = len(fqtab.data["FRQSEL"])
if nfrqsel > 1:
logger.warning(
"Input FQ Tables have more than single FRQSEL. We only handle a uvfits with single FRQSEL.")
# read meta information
def arraylize(input):
from numpy import isscalar, array
if isscalar(input):
return array([input])
else:
return input
spwfreq = arraylize(float64(fqtab.data["IF FREQ"][0]))
chbw = arraylize(float64(fqtab.data["CH WIDTH"][0]))
sideband = arraylize(float64(fqtab.data["SIDEBAND"][0]))
# check the consistency between the number of if in FQ Table and GroupHDU
if len(spwfreq) != Nspw:
raise ValueError(
"Group HDU has %d IFs, which is inconsistent with FQ table with %d IFs" % (
Nspw, len(spwfreq))
)
# Make FreqTable
dataset = Dataset(
coords=dict(
spw_freq=("spw", reffreq+spwfreq),
ch_bw=("spw", chbw),
sideband=("spw", sideband)
),
attrs=dict(
name=name,
Nch=Nch,
)
)
freq = FreqData(dataset)
freq.recalc_freq()
return freq
def uvfits2src(ghdu):
"""
Load the source information from uvfits HDUs into the SMILI format.
Args:
ghdu (astropy.io.fits.HDU): Group (Primary) HDU
Returns:
SrcData: Loaded frequency table
"""
from ..src.src import SrcData
from xarray import Dataset
# source info
srcname = ghdu.header["OBJECT"]
ra = ghdu.header["CRVAL6"]
dec = ghdu.header["CRVAL7"]
if "EQUINOX" in ghdu.header.keys():
equinox = ghdu.header["EQUINOX"]
coordsys = "fk5"
elif "EPOCH" in ghdu.header.keys():
equinox = ghdu.header["EPOCH"]
coordsys = "fk5"
else:
equinox = -1
if equinox < 0:
equinox = -1
coordsys = "icrs"
src = Dataset(
attrs=dict(
name=srcname,
ra=ra,
dec=dec,
equinox=equinox,
coordsys=coordsys
),
)
return SrcData(src)
| [
"logging.getLogger",
"numpy.array",
"astropy.io.fits.open",
"numpy.arange",
"numpy.int64",
"numpy.isscalar",
"numpy.float64",
"netCDF4.Dataset",
"numpy.isinf",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"numpy.sign",
"zarr.open",
"numpy.finfo",
"numpy.modf",
"numpy.unique",
"os.path... | [((119, 138), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (128, 138), False, 'from logging import getLogger\n'), ((478, 488), 'numpy.int64', 'int64', (['key'], {}), '(key)\n', (483, 488), False, 'from numpy import float64, int32, int64, zeros, where, power\n'), ((6607, 6619), 'numpy.zeros', 'zeros', (['Ndata'], {}), '(Ndata)\n', (6612, 6619), False, 'from numpy import asarray, zeros, ones, unique\n'), ((6630, 6642), 'numpy.zeros', 'zeros', (['Ndata'], {}), '(Ndata)\n', (6635, 6642), False, 'from numpy import asarray, zeros, ones, unique\n'), ((8999, 9007), 'numpy.modf', 'modf', (['bl'], {}), '(bl)\n', (9003, 9007), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n'), ((9023, 9048), 'numpy.int64', 'int64', (['(100 * subarray + 1)'], {}), '(100 * subarray + 1)\n', (9028, 9048), False, 'from numpy import float64, int32, int64, zeros, where, power\n'), ((10960, 10970), 'numpy.ones', 'ones', (['Nant'], {}), '(Nant)\n', (10964, 10970), False, 'from numpy import asarray, zeros, ones, unique\n'), ((10989, 11000), 'numpy.zeros', 'zeros', (['Nant'], {}), '(Nant)\n', (10994, 11000), False, 'from numpy import asarray, zeros, ones, unique\n'), ((11017, 11028), 'numpy.zeros', 'zeros', (['Nant'], {}), '(Nant)\n', (11022, 11028), False, 'from numpy import asarray, zeros, ones, unique\n'), ((11642, 11670), 'numpy.unique', 'unique', (["antab.data['POLTYA']"], {}), "(antab.data['POLTYA'])\n", (11648, 11670), False, 'from numpy import asarray, zeros, ones, unique\n'), ((11682, 11710), 'numpy.unique', 'unique', (["antab.data['POLTYB']"], {}), "(antab.data['POLTYB'])\n", (11688, 11710), False, 'from numpy import asarray, zeros, ones, unique\n'), ((1773, 1790), 'astropy.io.fits.open', 'pf.open', (['inuvfits'], {}), '(inuvfits)\n', (1780, 1790), True, 'import astropy.io.fits as pf\n'), ((2401, 2430), 'os.path.join', 'os.path.join', (['group', 'ds.group'], {}), '(group, ds.group)\n', (2413, 2430), False, 'import os\n'), ((6038, 6070), 'numpy.float64', 'float64', (['vis_ghdu[:, :, :, :, 0]'], {}), '(vis_ghdu[:, :, :, :, 0])\n', (6045, 6070), False, 'from numpy import float64\n'), ((6201, 6230), 'numpy.sign', 'sign', (['vis_ghdu[:, :, :, :, 2]'], {}), '(vis_ghdu[:, :, :, :, 2])\n', (6205, 6230), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n'), ((6267, 6279), 'numpy.isinf', 'isinf', (['sigma'], {}), '(sigma)\n', (6272, 6279), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n'), ((6335, 6347), 'numpy.isnan', 'isnan', (['sigma'], {}), '(sigma)\n', (6340, 6347), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n'), ((7876, 7903), 'astropy.time.Time', 'Time', (['jd1', 'jd2'], {'format': '"""jd"""'}), "(jd1, jd2, format='jd')\n", (7880, 7903), False, 'from astropy.time import Time\n'), ((9060, 9076), 'numpy.int64', 'int64', (['(bl // 256)'], {}), '(bl // 256)\n', (9065, 9076), False, 'from numpy import float64, int32, int64, zeros, where, power\n'), ((9090, 9105), 'numpy.int64', 'int64', (['(bl % 256)'], {}), '(bl % 256)\n', (9095, 9105), False, 'from numpy import float64, int32, int64, zeros, where, power\n'), ((13717, 13732), 'numpy.isscalar', 'isscalar', (['input'], {}), '(input)\n', (13725, 13732), False, 'from numpy import isscalar, array\n'), ((13831, 13864), 'numpy.float64', 'float64', (["fqtab.data['IF FREQ'][0]"], {}), "(fqtab.data['IF FREQ'][0])\n", (13838, 13864), False, 'from numpy import float64\n'), ((13887, 13921), 'numpy.float64', 'float64', (["fqtab.data['CH WIDTH'][0]"], {}), "(fqtab.data['CH WIDTH'][0])\n", (13894, 13921), False, 'from numpy import float64\n'), ((13948, 13982), 'numpy.float64', 'float64', (["fqtab.data['SIDEBAND'][0]"], {}), "(fqtab.data['SIDEBAND'][0])\n", (13955, 13982), False, 'from numpy import float64\n'), ((2179, 2207), 'zarr.open', 'zarr.open', (['outfile'], {'mode': '"""w"""'}), "(outfile, mode='w')\n", (2188, 2207), False, 'import zarr\n'), ((6088, 6120), 'numpy.float64', 'float64', (['vis_ghdu[:, :, :, :, 1]'], {}), '(vis_ghdu[:, :, :, :, 1])\n', (6095, 6120), False, 'from numpy import float64\n'), ((6147, 6175), 'numpy.abs', 'abs', (['vis_ghdu[:, :, :, :, 2]'], {}), '(vis_ghdu[:, :, :, :, 2])\n', (6150, 6175), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n'), ((9119, 9135), 'numpy.unique', 'unique', (['subarray'], {}), '(subarray)\n', (9125, 9135), False, 'from numpy import asarray, zeros, ones, unique\n'), ((13753, 13767), 'numpy.array', 'array', (['[input]'], {}), '([input])\n', (13758, 13767), False, 'from numpy import isscalar, array\n'), ((2284, 2318), 'netCDF4.Dataset', 'netCDF4.Dataset', (['outfile'], {'mode': '"""w"""'}), "(outfile, mode='w')\n", (2299, 2318), False, 'import netCDF4\n'), ((6411, 6425), 'numpy.finfo', 'finfo', (['float64'], {}), '(float64)\n', (6416, 6425), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n'), ((8008, 8021), 'numpy.unique', 'unique', (['srcid'], {}), '(srcid)\n', (8014, 8021), False, 'from numpy import asarray, zeros, ones, unique\n'), ((8709, 8724), 'numpy.unique', 'unique', (['freqsel'], {}), '(freqsel)\n', (8715, 8724), False, 'from numpy import asarray, zeros, ones, unique\n'), ((8553, 8564), 'numpy.unique', 'unique', (['mjd'], {}), '(mjd)\n', (8559, 8564), False, 'from numpy import asarray, zeros, ones, unique\n'), ((9413, 9425), 'numpy.arange', 'arange', (['Npol'], {}), '(Npol)\n', (9419, 9425), False, 'from numpy import abs, sign, isinf, isnan, finfo, unique, modf, arange, min, diff\n')] |
"""Example of Neural Network Analysis"""
# Dependencies
from sklearn.neural_network import MLPRegressor
import numpy as np
# Questionaire data (WEEK, YEARS, BOOKS, PROJECTS, EARN, RATING)
X = np.array(
[[20, 11, 20, 30, 4000, 3000],
[12, 4, 0, 0, 1000, 1500],
[2, 0, 1, 10, 0, 1400],
[35, 5, 10, 70, 6000, 3800],
[30, 1, 4, 65, 0, 3900],
[35, 1, 0, 0, 0, 100],
[15, 1, 2, 25, 0, 3700],
[40, 3, -1, 60, 1000, 2000],
[40, 1, 2, 95, 0, 1000],
[10, 0, 0, 0, 0, 1400],
[30, 1, 0, 50, 0, 1700],
[1, 0, 0, 45, 0, 1762],
[10, 32, 10, 5, 0, 2400],
[5, 35, 4, 0, 13000, 3900],
[8, 9, 40, 30, 1000, 2625],
[1, 0, 1, 0, 0, 1900],
[1, 30, 10, 0, 1000, 1900],
[7, 16, 5, 0, 0, 3000]])
# One-liner
neural_net = MLPRegressor(max_iter=10000).fit(X[:,:-1], X[:,-1])
# Result
res = neural_net.predict([[20, 1, 10, 50, 1000]])
print(res) | [
"numpy.array",
"sklearn.neural_network.MLPRegressor"
] | [((194, 697), 'numpy.array', 'np.array', (['[[20, 11, 20, 30, 4000, 3000], [12, 4, 0, 0, 1000, 1500], [2, 0, 1, 10, 0, \n 1400], [35, 5, 10, 70, 6000, 3800], [30, 1, 4, 65, 0, 3900], [35, 1, 0,\n 0, 0, 100], [15, 1, 2, 25, 0, 3700], [40, 3, -1, 60, 1000, 2000], [40, \n 1, 2, 95, 0, 1000], [10, 0, 0, 0, 0, 1400], [30, 1, 0, 50, 0, 1700], [1,\n 0, 0, 45, 0, 1762], [10, 32, 10, 5, 0, 2400], [5, 35, 4, 0, 13000, 3900\n ], [8, 9, 40, 30, 1000, 2625], [1, 0, 1, 0, 0, 1900], [1, 30, 10, 0, \n 1000, 1900], [7, 16, 5, 0, 0, 3000]]'], {}), '([[20, 11, 20, 30, 4000, 3000], [12, 4, 0, 0, 1000, 1500], [2, 0, 1,\n 10, 0, 1400], [35, 5, 10, 70, 6000, 3800], [30, 1, 4, 65, 0, 3900], [35,\n 1, 0, 0, 0, 100], [15, 1, 2, 25, 0, 3700], [40, 3, -1, 60, 1000, 2000],\n [40, 1, 2, 95, 0, 1000], [10, 0, 0, 0, 0, 1400], [30, 1, 0, 50, 0, 1700\n ], [1, 0, 0, 45, 0, 1762], [10, 32, 10, 5, 0, 2400], [5, 35, 4, 0, \n 13000, 3900], [8, 9, 40, 30, 1000, 2625], [1, 0, 1, 0, 0, 1900], [1, 30,\n 10, 0, 1000, 1900], [7, 16, 5, 0, 0, 3000]])\n', (202, 697), True, 'import numpy as np\n'), ((788, 816), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'max_iter': '(10000)'}), '(max_iter=10000)\n', (800, 816), False, 'from sklearn.neural_network import MLPRegressor\n')] |
import numpy as np
import random
class rps_game():
def __init__(self):
self.number_of_players = 2
#self.state = np.zeros(2)
self.reward = np.zeros(2)
self.done = False
def step(self, action):
#self.state = action#np.array((action[0]*action[1],action[0]*action[1]))
if action[0] == 0:
if action[1] ==0:
reward = 0
elif action[1] ==1:
reward = 1
else:
reward = -1
elif action[0] == 1:
if action[1] == 0:
reward = -1
elif action[1] ==1:
reward = 0
else:
reward = 1
else:
if action[1] == 0:
reward = 1
elif action[1] ==1:
reward = -1
else:
reward = 0
self.reward = np.array((-reward,reward))
info = {}
return 0, self.reward[0], self.reward[1], True, info
def reset(self):
#self.state = np.zeros(2)
self.reward = np.zeros(2)
self.done = False
info = {}
return 0, self.reward[0], self.reward[1], self.done, info
class nz_rps_game():
def __init__(self):
self.number_of_players = 2
#self.state = np.zeros(2)
self.reward = np.zeros(2)
self.done = False
def step(self, action):
#self.state = action#np.array((action[0]*action[1],action[0]*action[1]))
if action[0] == 0:
if action[1] ==0:
reward1 = 0
reward2 = 0
elif action[1] ==1:
reward1 = 2
reward2 = -1
else:
reward1 = 1
reward2 = -2
elif action[0] == 1:
if action[1] == 0:
reward1 = 1
reward2 = -2
elif action[1] ==1:
reward1 = 0
reward2 = 0
else:
reward1 = 2
reward2 = -1
else:
if action[1] == 0:
reward1 = 2
reward2 = -1
elif action[1] ==1:
reward1 = 1
reward2 = -2
else:
reward1 = 0
reward2 = 0
self.reward = np.array((reward1,reward2))
info = {}
return 0, self.reward[0], self.reward[1], True, info
def reset(self):
#self.state = np.zeros(2)
self.reward = np.zeros(2)
self.done = False
info = {}
return 0, self.reward[0], self.reward[1], self.done, info
if __name__ == '__main__':
env = rps_game()
for i in range(10):
state, _, _, _, _ = env.reset()
action = (np.random.randint(3,size=2)).reshape(2)
state, reward1, reward2, done, _ = env.step(action)
print(action, reward1, reward2) | [
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
] | [((166, 177), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (174, 177), True, 'import numpy as np\n'), ((895, 922), 'numpy.array', 'np.array', (['(-reward, reward)'], {}), '((-reward, reward))\n', (903, 922), True, 'import numpy as np\n'), ((1079, 1090), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1087, 1090), True, 'import numpy as np\n'), ((1338, 1349), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1346, 1349), True, 'import numpy as np\n'), ((2332, 2360), 'numpy.array', 'np.array', (['(reward1, reward2)'], {}), '((reward1, reward2))\n', (2340, 2360), True, 'import numpy as np\n'), ((2517, 2528), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2525, 2528), True, 'import numpy as np\n'), ((2771, 2799), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'size': '(2)'}), '(3, size=2)\n', (2788, 2799), True, 'import numpy as np\n')] |
import numpy as np
from collections import defaultdict
import random
import os
'''
code book:
circle: 0
rect: 1
tri: 2
ellipse: 3
star: 4
loop: 5
red: 6
green: 7
blue: 8
yellow: 9
cyan: 10
magenta: 11
large: 12
small: 13
upper_left: 14
upper_right: 15
lower_left: 16
lower_right: 17
'''
def get_combination(samples, num):
if num == 0:
return [[]]
else:
combinations = []
while len(samples) > 0:
s = samples[0]
samples = samples[1:]
sub_combinations = get_combination(samples, num - 1)
combinations.extend([sc + [s] for sc in sub_combinations])
return combinations
class Concept:
def __init__(self, game_config):
self.attributes = game_config.attributes
self.num_distractors = game_config.num_distractors
self.concept_size = 4 # color, shape, size, position
self.dataset_attributes_path = game_config.dataset_attributes_path
self.img_h = game_config.img_h
self.img_w = game_config.img_w
self.save_dir = game_config.save_dir
if not os.path.exists(self.dataset_attributes_path):
raise Exception('dataset_attributes_path does not exist')
self.dataset_attributes = np.load(self.dataset_attributes_path)
assert(self.dataset_attributes.shape[1] == len(self.attributes))
self.data_count = self.dataset_attributes.shape[0]
if 'images_path' in game_config and game_config.images_path is not None:
self.img_as_data = True
self.images_path = game_config.images_path
if not os.path.exists(self.images_path):
raise Exception('images_path does not exist')
self.images = np.load(self.images_path)
assert(self.images.shape[1] == self.img_h and self.images.shape[2] == self.img_w)
assert(self.images.shape[0] == self.data_count)
else:
if 'feat_dir' in game_config and game_config.feat_dir is not None:
load_dir = game_config.feat_dir
else:
load_dir = self.save_dir
self.img_as_data = False
self.teacher_feat_path = os.path.join(load_dir, 'Geometry_Teacher_features.npy')
self.student_feat_path = os.path.join(load_dir, 'Geometry_Student_features.npy')
if not os.path.exists(self.teacher_feat_path) or not os.path.exists(self.student_feat_path):
raise Exception('teacher_feat_path or student_feat_path does not exist')
self.teacher_features = np.load(self.teacher_feat_path)
self.student_features = np.load(self.student_feat_path)
assert(self.teacher_features.shape == self.student_features.shape)
assert(self.teacher_features.shape[0] == self.data_count)
def store_features(self, teacher, student):
def helper(agent):
save_path = os.path.join(self.save_dir, 'Geometry_%s_features.npy' % agent.role_)
mod = self.images.shape[0] % self.num_distractors
if mod == 0:
cnn_input = self.images.reshape([-1, self.num_distractors, *self.images.shape[1:]])
else:
dim_to_append = self.num_distractors - mod
padding = np.zeros([dim_to_append, *self.images.shape[1:]])
cnn_input = np.concatenate([self.images, padding], axis = 0).reshape([-1, self.num_distractors, *self.images.shape[1:]])
cnn_output = agent.sess_.run(agent.perception_core_.visual_features_, feed_dict = {agent.distractors_: cnn_input})
if mod == 0:
features = cnn_output.reshape([self.images.shape[0], cnn_output.shape[-1]])
else:
features = cnn_output.reshape([self.images.shape[0] + dim_to_append, cnn_output.shape[-1]])[:self.images.shape[0]]
np.save(save_path, features)
helper(teacher)
helper(student)
def rd_generate_concept(self):
chosen_idx = np.random.randint(0, self.data_count, size = self.num_distractors)
concept_embed = self.dataset_attributes[chosen_idx]
concepts = []
included_attributes = set()
for embed in concept_embed:
embed_attributes = np.where(embed)[0].tolist()
concepts.append(embed_attributes)
included_attributes.update(embed_attributes)
if self.img_as_data:
distractors = self.images[chosen_idx]
return concepts, list(included_attributes), distractors, distractors
else:
teacher_distractors = self.teacher_features[chosen_idx]
student_distractors = self.student_features[chosen_idx]
return concepts, list(included_attributes), teacher_distractors, student_distractors
def teaching_dim(self, concepts, included_attrs):
td_dict = {}
teaching_sample = defaultdict(list)
sample_size = 1
smallest_sample_size = self.concept_size
for i in range(len(concepts)):
for j in range(len(concepts)):
if set(concepts[i]).issubset(set(concepts[j])) and i != j:
td_dict[tuple(concepts[i])] = (self.concept_size, tuple(concepts[i]))
while len(td_dict) < len(concepts):
all_teaching_samples = get_combination(included_attrs, sample_size)
for ts in all_teaching_samples:
for concept in concepts:
if set(ts).issubset(set(concept)):
teaching_sample[tuple(ts)].append(concept)
for ts in teaching_sample:
if len(teaching_sample[ts]) == 1:
concept = teaching_sample[ts][0]
if td_dict.get(tuple(concept)) is None:
td_dict[tuple(concept)] = (sample_size, ts)
smallest_sample_size = min(smallest_sample_size, sample_size)
###
# if len(td_dict) == len(concepts):
# return True
# else:
# return False
###
sample_size += 1
###
# return False
###
return td_dict, smallest_sample_size
def recursive_teaching_dim(self, concepts, current_most = 0):
if len(concepts) == 0:
return current_most
included_attributes = []
for c in concepts:
for e in c:
included_attributes.append(e)
included_attributes = list(set(included_attributes))
td_dict, smallest_sample_size = self.teaching_dim(concepts, included_attributes)
new_concepts = [c for c in concepts if td_dict[tuple(c)][0] > smallest_sample_size]
return self.recursive_teaching_dim(new_concepts, max(smallest_sample_size, current_most))
def bayesian_update(self, old_belief, concepts, info):
likelihood = []
for concept in concepts:
prob = 1.0 * (info in concept) / len(concept)
likelihood.append(prob)
new_belief = old_belief * np.array(likelihood)
new_belief /= np.sum(new_belief) + 1e-9
return new_belief
def generate_batch(self, batch_size, role, epsilon = 0.4):
data = {'prev_belief': [None] * batch_size,
'message': [None] * batch_size,
'distractors': [None] * batch_size,
'new_belief': [None] * batch_size}
if self.img_as_data:
distractors = self.images
else:
if role == 'Teacher':
distractors = self.teacher_features
elif role == 'Student':
distractors = self.student_feat_path
else:
raise Exception('Wrong role passed in generate_batch')
for i in range(batch_size):
chosen_idx = np.random.randint(0, self.data_count, size = self.num_distractors)
concept_embed = self.dataset_attributes[chosen_idx]
concepts = []
included_attributes = set()
for embed in concept_embed:
embed_attributes = np.where(embed)[0].tolist()
concepts.append(embed_attributes)
included_attributes.update(embed_attributes)
included_attributes = list(included_attributes)
prev_belief = np.random.random(self.num_distractors)
prev_belief /= np.sum(prev_belief)
rd = np.random.choice(2, 1, p = [1 - epsilon, epsilon])
if rd == 1:
msg = included_attributes[np.random.randint(len(included_attributes))]
else:
msg = np.random.randint(len(self.attributes))
embeded_msg = np.zeros(len(self.attributes))
embeded_msg[msg] = 1
new_belief = self.bayesian_update(prev_belief, concepts, msg)
data['prev_belief'][i] = prev_belief
data['message'][i] = embeded_msg
data['distractors'][i] = distractors[chosen_idx]
data['new_belief'][i] = new_belief
for j in data:
data[j] = np.array(data[j])
return data
if __name__ == '__main__':
pass
# np.set_printoptions(threshold=np.nan)
# num_concept = 7
# # concepts, concept_embed, included_attrs = concept_space.rd_generate_concept()
# # print(concepts)
# # tensor = concept_space.get_images_tensor(concepts)
# # for img in concept_space.tensor2ims(tensor):
# # img.show()
# # input()
# import time
# t1 = time.time()
# data = concept_space.generate_batch(1000)
# diff = time.time() - t1
# print(diff)
# ims = concept_space.images_tensor_mean[[0, 4, 0, 4]]
# ims_scaled = []
# for im in ims:
# im -= np.min(im)
# im /= np.max(im)
# im *= 255
# ims_scaled.append(im)
# ims_scaled = np.array(ims_scaled)
# for im in concept_space.tensor2ims(ims_scaled):
# im.show()
# input()
# print(concept_space.images_tensor.shape)
# im = np.mean(concept_space.images_tensor, axis=0)
# im -= np.min(im)
# im /= np.max(im)
# im *= 255
# concept_space.arr2im(im).show()
# print(data['prev_belief'][:10])
# print(data['new_belief'][:10])
# print((1000 - np.count_nonzero(np.sum(data['new_belief'], axis=1))) / 1000)
# for im in concept_space.images_tensor_mean:
# print(np.min(im))
# im -= np.min(im)
# print(np.min(im))
# im /= np.max(im)
# im *= 255
# print(np.min(im))
# # concept_space.arr2im(im).show()
# for num_concept in range(4, 8):
# concept_space = Concept(num_concept, n_grid_per_side=3)
# count = 0
# for i in range(100000):
# concepts, _, included_attributes, _ = concept_space.rd_generate_concept()
# if concept_space.teaching_dim(concepts, included_attributes):
# count += 1
# print("mujoco with {} dis and 9 grids: {}".format(num_concept, count / 100000))
# for i in [-4, -8]:
# im_arr = concept_space.images_tensor[i]
# concept_space.arr2im(im_arr).show()
# concept_space.arr2im(arr).show()
# map = concept_space.get_att_map_arr(concepts, [1/num_concept for i in range(num_concept)])
# print(map)
# concept_space.arr2im(map).show()
# target_map = concept_space.target_att_map_arr(concepts, 1)
# concept_space.arr2im(target_map).show()
# prior = np.ones(num_concept) / num_concept
# belief1 = concept_space.bayesian_update(prior, concepts, concepts[1][0])
# belief2 = concept_space.bayesian_update(belief1, concepts, concepts[1][1])
# belief3 = concept_space.bayesian_update(belief2, concepts, concepts[1][2])
# belief4 = concept_space.bayesian_update(belief2, concepts, concepts[1][3])
# beliefs = [prior, belief1, belief2, belief3, belief4]
# for b in beliefs:
# map = concept_space.get_att_map_arr(concepts, b)
# concept_space.arr2im(map).show()
# print(concept_space.concepts_to_dscps(concepts))
| [
"os.path.exists",
"numpy.random.random",
"numpy.random.choice",
"numpy.where",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"numpy.sum",
"collections.defaultdict",
"numpy.zeros",
"numpy.concatenate",
"numpy.load",
"numpy.save"
] | [((1111, 1148), 'numpy.load', 'np.load', (['self.dataset_attributes_path'], {}), '(self.dataset_attributes_path)\n', (1118, 1148), True, 'import numpy as np\n'), ((3474, 3538), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.data_count'], {'size': 'self.num_distractors'}), '(0, self.data_count, size=self.num_distractors)\n', (3491, 3538), True, 'import numpy as np\n'), ((4244, 4261), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4255, 4261), False, 'from collections import defaultdict\n'), ((976, 1020), 'os.path.exists', 'os.path.exists', (['self.dataset_attributes_path'], {}), '(self.dataset_attributes_path)\n', (990, 1020), False, 'import os\n'), ((1528, 1553), 'numpy.load', 'np.load', (['self.images_path'], {}), '(self.images_path)\n', (1535, 1553), True, 'import numpy as np\n'), ((1898, 1953), 'os.path.join', 'os.path.join', (['load_dir', '"""Geometry_Teacher_features.npy"""'], {}), "(load_dir, 'Geometry_Teacher_features.npy')\n", (1910, 1953), False, 'import os\n'), ((1982, 2037), 'os.path.join', 'os.path.join', (['load_dir', '"""Geometry_Student_features.npy"""'], {}), "(load_dir, 'Geometry_Student_features.npy')\n", (1994, 2037), False, 'import os\n'), ((2238, 2269), 'numpy.load', 'np.load', (['self.teacher_feat_path'], {}), '(self.teacher_feat_path)\n', (2245, 2269), True, 'import numpy as np\n'), ((2297, 2328), 'numpy.load', 'np.load', (['self.student_feat_path'], {}), '(self.student_feat_path)\n', (2304, 2328), True, 'import numpy as np\n'), ((2543, 2612), 'os.path.join', 'os.path.join', (['self.save_dir', "('Geometry_%s_features.npy' % agent.role_)"], {}), "(self.save_dir, 'Geometry_%s_features.npy' % agent.role_)\n", (2555, 2612), False, 'import os\n'), ((3360, 3388), 'numpy.save', 'np.save', (['save_path', 'features'], {}), '(save_path, features)\n', (3367, 3388), True, 'import numpy as np\n'), ((5982, 6002), 'numpy.array', 'np.array', (['likelihood'], {}), '(likelihood)\n', (5990, 6002), True, 'import numpy as np\n'), ((6019, 6037), 'numpy.sum', 'np.sum', (['new_belief'], {}), '(new_belief)\n', (6025, 6037), True, 'import numpy as np\n'), ((6595, 6659), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.data_count'], {'size': 'self.num_distractors'}), '(0, self.data_count, size=self.num_distractors)\n', (6612, 6659), True, 'import numpy as np\n'), ((7002, 7040), 'numpy.random.random', 'np.random.random', (['self.num_distractors'], {}), '(self.num_distractors)\n', (7018, 7040), True, 'import numpy as np\n'), ((7059, 7078), 'numpy.sum', 'np.sum', (['prev_belief'], {}), '(prev_belief)\n', (7065, 7078), True, 'import numpy as np\n'), ((7087, 7135), 'numpy.random.choice', 'np.random.choice', (['(2)', '(1)'], {'p': '[1 - epsilon, epsilon]'}), '(2, 1, p=[1 - epsilon, epsilon])\n', (7103, 7135), True, 'import numpy as np\n'), ((7620, 7637), 'numpy.array', 'np.array', (['data[j]'], {}), '(data[j])\n', (7628, 7637), True, 'import numpy as np\n'), ((1427, 1459), 'os.path.exists', 'os.path.exists', (['self.images_path'], {}), '(self.images_path)\n', (1441, 1459), False, 'import os\n'), ((2840, 2889), 'numpy.zeros', 'np.zeros', (['[dim_to_append, *self.images.shape[1:]]'], {}), '([dim_to_append, *self.images.shape[1:]])\n', (2848, 2889), True, 'import numpy as np\n'), ((2048, 2086), 'os.path.exists', 'os.path.exists', (['self.teacher_feat_path'], {}), '(self.teacher_feat_path)\n', (2062, 2086), False, 'import os\n'), ((2094, 2132), 'os.path.exists', 'os.path.exists', (['self.student_feat_path'], {}), '(self.student_feat_path)\n', (2108, 2132), False, 'import os\n'), ((2906, 2952), 'numpy.concatenate', 'np.concatenate', (['[self.images, padding]'], {'axis': '(0)'}), '([self.images, padding], axis=0)\n', (2920, 2952), True, 'import numpy as np\n'), ((3693, 3708), 'numpy.where', 'np.where', (['embed'], {}), '(embed)\n', (3701, 3708), True, 'import numpy as np\n'), ((6819, 6834), 'numpy.where', 'np.where', (['embed'], {}), '(embed)\n', (6827, 6834), True, 'import numpy as np\n')] |
#
#
#
import numpy as np
# from src.utilities.plotting_cpp import Plot
#
# import numpy
import matplotlib.pyplot as plt
import scipy.interpolate as si
#
# points = [[0, 0], [0, 2], [2, 3], [4, 0], [6, 3], [8, 2], [8, 0]];
# points = np.array(points)
# x = points[:,0]
# y = points[:,1]
#
# t = range(len(points))
# ipl_t = np.linspace(0.0, len(points) - 1, 100)
#
# x_tup = si.splrep(t, x, k=3)
# y_tup = si.splrep(t, y, k=3)
#
# x_list = list(x_tup)
# xl = x.tolist()
# x_list[1] = xl + [0.0, 0.0, 0.0, 0.0]
#
# y_list = list(y_tup)
# yl = y.tolist()
# y_list[1] = yl + [0.0, 0.0, 0.0, 0.0]
#
# x_i = si.splev(ipl_t, x_list)
# y_i = si.splev(ipl_t, y_list)
#
# #==============================================================================
# # Plot
# #==============================================================================
#
# fig = plt.figure()
#
# ax = fig.add_subplot(231)
# plt.plot(t, x, '-og')
# plt.plot(ipl_t, x_i, 'r')
# plt.xlim([0.0, max(t)])
# plt.title('Splined x(t)')
#
# ax = fig.add_subplot(232)
# plt.plot(t, y, '-og')
# plt.plot(ipl_t, y_i, 'r')
# plt.xlim([0.0, max(t)])
# plt.title('Splined y(t)')
#
# ax = fig.add_subplot(233)
# plt.plot(x, y, '-og')
# plt.plot(x_i, y_i, 'r')
# plt.xlim([min(x) - 0.3, max(x) + 0.3])
# plt.ylim([min(y) - 0.3, max(y) + 0.3])
# plt.title('Splined f(x(t), y(t))')
#
# ax = fig.add_subplot(234)
# for i in range(7):
# vec = np.zeros(11)
# vec[i] = 1.0
# x_list = list(x_tup)
# x_list[1] = vec.tolist()
# x_i = si.splev(ipl_t, x_list)
# plt.plot(ipl_t, x_i)
# plt.xlim([0.0, max(t)])
# plt.title('Basis splines')
# plt.show()
# phi = np.linspace(0, 2. * np.pi, 40)
# r = 0.5 + np.cos(phi) # polar coords
# x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
#
#
# from scipy.interpolate import splprep, splev
# tck, u = splprep([x, y], s=0)
# new_points = splev(u, tck)
#
#
# fig, ax = plt.subplots()
# ax.plot(x, y, 'ro')
# # ax.plot(new_points[0], new_points[1], 'r-')
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from mpl_toolkits.mplot3d import Axes3D
# # 3D example
# total_rad = 10
# z_factor = 3
# noise = 0.1
#
# num_true_pts = 200
# s_true = np.linspace(0, total_rad, num_true_pts)
# x_true = np.cos(s_true)
# y_true = np.sin(s_true)
# z_true = s_true/z_factor
#
# num_sample_pts = 80
# s_sample = np.linspace(0, total_rad, num_sample_pts)
# x_sample = np.cos(s_sample) + noise * np.random.randn(num_sample_pts)
# y_sample = np.sin(s_sample) + noise * np.random.randn(num_sample_pts)
# z_sample = s_sample/z_factor + noise * np.random.randn(num_sample_pts)
#
# from scipy.interpolate import splprep, splev
#
# tck, u = splprep([x_sample,y_sample,z_sample], s=2)
# x_knots, y_knots, z_knots = interpolate.splev(tck[0], tck)
# u_fine = np.linspace(0,1,num_true_pts)
# x_fine, y_fine, z_fine = interpolate.splev(u_fine, tck)
#
# fig2 = plt.figure(2)
# ax3d = fig2.add_subplot(111, projection='3d')
# ax3d.plot(x_true, y_true, z_true, 'b')
# ax3d.plot(x_sample, y_sample, z_sample, 'r*')
# ax3d.plot(x_knots, y_knots, z_knots, 'go')
# ax3d.plot(x_fine, y_fine, z_fine, 'g')
# fig2.show()
# plt.show()
def draw_from_ellipsoid(covmat, cent, npts):
# random uniform points within ellipsoid as per: http://www.astro.gla.ac.uk/~matthew/blog/?p=368
ndims = covmat.shape[0]
# calculate eigenvalues (e) and eigenvectors (v)
eigenValues, eigenVectors = np.linalg.eig(covmat)
idx = (-eigenValues).argsort()[::-1][:ndims]
e = eigenValues[idx]
v = eigenVectors[:, idx]
e = np.diag(e)
# generate radii of hyperspheres
rs = np.random.uniform(0, 1, npts)
# rs = np.arange(npts)
# generate points
pt = np.random.normal(0, 1, [npts, ndims])
# pt = np.arange(npts*ndims).reshape(npts, ndims)
# get scalings for each point onto the surface of a unit hypersphere
fac = np.sum(pt ** 2, axis=1)
# calculate scaling for each point to be within the unit hypersphere
# with radii rs
fac = (rs ** (1.0 / ndims)) / np.sqrt(fac)
pnts = np.zeros((npts, ndims))
# scale points to the ellipsoid using the eigenvalues and rotate with
# the eigenvectors and add centroid
d = np.sqrt(np.diag(e))
d.shape = (ndims, 1)
print(v)
for i in range(0, npts):
# scale points to a uniform distribution within unit hypersphere
pnts[i, :] = fac[i] * pt[i, :]
print(np.multiply(pnts[i, :], np.transpose(d)))
pnts[i, :] = np.dot(np.multiply(pnts[i, :], np.transpose(d)), np.transpose(v)) + cent
return pnts
covmat = np.diag((7, 4, 4))
# pnts = draw_from_ellipsoid(covmat, 0, 10000)
pnts = np.load('/home/geesara/Desktop/searchspace/1_search_space.npy')[0]
print(pnts)
plt.scatter(pnts[:,0], pnts[:,1], pnts[:,2])
plt.show()
| [
"numpy.random.normal",
"numpy.transpose",
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.diag",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"numpy.load",
"matplotlib.pyplot.show"
] | [((4578, 4596), 'numpy.diag', 'np.diag', (['(7, 4, 4)'], {}), '((7, 4, 4))\n', (4585, 4596), True, 'import numpy as np\n'), ((4730, 4777), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pnts[:, 0]', 'pnts[:, 1]', 'pnts[:, 2]'], {}), '(pnts[:, 0], pnts[:, 1], pnts[:, 2])\n', (4741, 4777), True, 'import matplotlib.pyplot as plt\n'), ((4775, 4785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4783, 4785), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3439), 'numpy.linalg.eig', 'np.linalg.eig', (['covmat'], {}), '(covmat)\n', (3431, 3439), True, 'import numpy as np\n'), ((3551, 3561), 'numpy.diag', 'np.diag', (['e'], {}), '(e)\n', (3558, 3561), True, 'import numpy as np\n'), ((3609, 3638), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'npts'], {}), '(0, 1, npts)\n', (3626, 3638), True, 'import numpy as np\n'), ((3697, 3734), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '[npts, ndims]'], {}), '(0, 1, [npts, ndims])\n', (3713, 3734), True, 'import numpy as np\n'), ((3873, 3896), 'numpy.sum', 'np.sum', (['(pt ** 2)'], {'axis': '(1)'}), '(pt ** 2, axis=1)\n', (3879, 3896), True, 'import numpy as np\n'), ((4051, 4074), 'numpy.zeros', 'np.zeros', (['(npts, ndims)'], {}), '((npts, ndims))\n', (4059, 4074), True, 'import numpy as np\n'), ((4651, 4714), 'numpy.load', 'np.load', (['"""/home/geesara/Desktop/searchspace/1_search_space.npy"""'], {}), "('/home/geesara/Desktop/searchspace/1_search_space.npy')\n", (4658, 4714), True, 'import numpy as np\n'), ((4026, 4038), 'numpy.sqrt', 'np.sqrt', (['fac'], {}), '(fac)\n', (4033, 4038), True, 'import numpy as np\n'), ((4206, 4216), 'numpy.diag', 'np.diag', (['e'], {}), '(e)\n', (4213, 4216), True, 'import numpy as np\n'), ((4436, 4451), 'numpy.transpose', 'np.transpose', (['d'], {}), '(d)\n', (4448, 4451), True, 'import numpy as np\n'), ((4524, 4539), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (4536, 4539), True, 'import numpy as np\n'), ((4506, 4521), 'numpy.transpose', 'np.transpose', (['d'], {}), '(d)\n', (4518, 4521), True, 'import numpy as np\n')] |
import numpy as np
import random
import path as path_lib
import sys
def mag(a):
return np.sqrt(a.dot(a))
def convert(feet):
return feet/345876.
BASE_LIFTS = 3
# Organisms will probably be treated as graphs with points representing the entry and exit points
class Resort_Map():
def __init__(self, chair_set=None, trail_set=None):
self.chair_set = chair_set # chair set is a list of arrays specifying chair end points
self.trail_set = trail_set # trail_set is a list of arrays specifying the trails
self.fitness = None
def make_path(self, chair):
x = np.linspace(chair[1,0],chair[0,0],5)
y = np.linspace(chair[1,1],chair[0,1],5)
self.trail_set.append(np.array([x,y]))
def owned_by(self, trail):
for chair in self.chair_set:
if np.sqrt(np.sum(np.square(chair[1] - trail[0]))) < convert(40) and np.sqrt(np.sum(np.square(chair[0] - trail[-1]))):
return chair
elif np.all(trail[0] == chair[0]):
return chair
return None
def trails_owned(self, chair):
out = []
for trail,i in zip(self.trail_set,range(len(self.trail_set))):
if np.sum(trail[:,0]-chair[1]) < convert(80) and np.sum(trail[:,-1] - chair[0]) < convert(80):
out.append(i)
elif np.all(np.array(trail[0] == chair[0])):
out.append(i)
return out
def make_chair(self, bottom, top):
self.chair_set.append(np.array([bottom, top]))
def rem_chair(self):
if len(self.chair_set) > BASE_LIFTS:
ind = np.random.randint(len(self.chair_set))
del self.chair_set[ind]
| [
"numpy.square",
"numpy.array",
"numpy.linspace",
"numpy.sum",
"numpy.all"
] | [((603, 643), 'numpy.linspace', 'np.linspace', (['chair[1, 0]', 'chair[0, 0]', '(5)'], {}), '(chair[1, 0], chair[0, 0], 5)\n', (614, 643), True, 'import numpy as np\n'), ((652, 692), 'numpy.linspace', 'np.linspace', (['chair[1, 1]', 'chair[0, 1]', '(5)'], {}), '(chair[1, 1], chair[0, 1], 5)\n', (663, 692), True, 'import numpy as np\n'), ((720, 736), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (728, 736), True, 'import numpy as np\n'), ((1509, 1532), 'numpy.array', 'np.array', (['[bottom, top]'], {}), '([bottom, top])\n', (1517, 1532), True, 'import numpy as np\n'), ((991, 1019), 'numpy.all', 'np.all', (['(trail[0] == chair[0])'], {}), '(trail[0] == chair[0])\n', (997, 1019), True, 'import numpy as np\n'), ((1210, 1240), 'numpy.sum', 'np.sum', (['(trail[:, 0] - chair[1])'], {}), '(trail[:, 0] - chair[1])\n', (1216, 1240), True, 'import numpy as np\n'), ((1256, 1287), 'numpy.sum', 'np.sum', (['(trail[:, -1] - chair[0])'], {}), '(trail[:, -1] - chair[0])\n', (1262, 1287), True, 'import numpy as np\n'), ((1356, 1386), 'numpy.array', 'np.array', (['(trail[0] == chair[0])'], {}), '(trail[0] == chair[0])\n', (1364, 1386), True, 'import numpy as np\n'), ((910, 941), 'numpy.square', 'np.square', (['(chair[0] - trail[-1])'], {}), '(chair[0] - trail[-1])\n', (919, 941), True, 'import numpy as np\n'), ((844, 874), 'numpy.square', 'np.square', (['(chair[1] - trail[0])'], {}), '(chair[1] - trail[0])\n', (853, 874), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 21:12:33 2019
@author: tungo
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
def process_prediction(x, dims, anchors, num_classes):
num_anchors = len(anchors)
fmap_size = x.size(2)
height, width = dims[0], dims[1]
scale_h, scale_w = height // x.size(2), width // x.size(3)
# transform x
x = x.view(x.size(0), x.size(1), -1)
x = x.transpose(1, 2).contiguous()
x = x.view(x.size(0), x.size(1)*num_anchors, -1)
# scale anchors' dims with respect to feature map
anchors = [(a[0]/scale_h, a[1]/scale_w) for a in anchors]
# calculate boxes' centers and objectness
x[:,:,0] = torch.sigmoid(x[:,:,0])
x[:,:,1] = torch.sigmoid(x[:,:,1])
x[:,:,4] = torch.sigmoid(x[:,:,4])
# print(torch.sum(x))
# Add the center offsets
grid = np.arange(fmap_size) # height = width -> pick one
a,b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
x[:,:,:2] += x_y_offset
# Calculate boxes' height and width
anchors = torch.FloatTensor(anchors)
# print(torch.sum(anchors))
anchors = anchors.repeat(fmap_size*fmap_size, 1).unsqueeze(0)
x[:,:,2:4] = torch.exp(x[:,:,2:4])*anchors
# print(torch.sum(x))
# Calculate class score
x[:,:,5:] = torch.sigmoid(x[:,:,5:])
# Scale back to normal size for bx, by, bw, bh
x[:,:,:4] *= scale_h
return x
def unique_class(classes):
return torch.unique(classes, dim=-1)
def write_results(prediction, thresh_pred, num_classes, iou_thresh=0.4):
batch_size = prediction.size(0)
write = False
conf_mask = (prediction[:,:,4] > thresh_pred).float().unsqueeze(2)
pred_mask = conf_mask * prediction
pred_box_corners = torch.clone(pred_mask)
pred_box_corners[:,:,0] = pred_mask[:,:,0] - pred_mask[:,:,2]/2
pred_box_corners[:,:,1] = pred_mask[:,:,1] - pred_mask[:,:,3]/2
pred_box_corners[:,:,2] = pred_mask[:,:,0] + pred_mask[:,:,2]/2
pred_box_corners[:,:,3] = pred_mask[:,:,1] + pred_mask[:,:,3]/2
for i in range(batch_size):
img_pred = pred_box_corners[i]
scores, classes = torch.max(img_pred[:,5:(5+num_classes)], dim=-1, keepdim=True)
img_pred = torch.cat((img_pred[:,:5], scores.float(), classes.float()), dim=1)
nonzero_idx = torch.nonzero(img_pred[:,4]).squeeze(1)
if (nonzero_idx.size(0) > 0):
img_pred_ = img_pred[nonzero_idx]
img_classes = unique_class(img_pred_[:,-1])
for cl in img_classes:
cl_mask = img_pred_ * (img_pred_[:,-1] == cl).float().unsqueeze(1)
cl_mask_nonzero = torch.nonzero(cl_mask[:,-2]).squeeze()
img_pred_class = img_pred_[cl_mask_nonzero].view(-1,7)
conf_sort_val, conf_sort_idx = torch.sort(img_pred_class[:,4], descending=True)
img_pred_class = img_pred_class[conf_sort_idx].view(-1, 7)
len_img_pred = img_pred_class.size(0)
for idx in range(len_img_pred):
try:
iou = calc_iou(img_pred_class[idx], img_pred_class[(idx+1):])
except:
break
iou_mask = (iou < iou_thresh).float().unsqueeze(1)
img_pred_class[idx+1:] *= iou_mask
nonzero_idx = torch.nonzero(img_pred_class[:,4]).squeeze()
img_pred_class = img_pred_class[nonzero_idx].view(-1,7)
batch_ind = img_pred_class.new(img_pred_class.size(0), 1).fill_(i) #Repeat the batch_id for as many detections of the class cls in the image
seq = batch_ind, img_pred_class
if not write:
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
return output
def calc_iou(box1, boxes2):
xi1 = torch.max(box1[0], boxes2[:,0])
yi1 = torch.max(box1[1], boxes2[:,1])
xi2 = torch.min(box1[2], boxes2[:,2])
yi2 = torch.min(box1[3], boxes2[:,3])
intersection = torch.clamp(xi2-xi1, min=0) * torch.clamp(yi2-yi1, min=0)
area_box1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
area_boxes2 = (boxes2[:,2] - boxes2[:,0]) * (boxes2[:,3] - boxes2[:,1])
union = area_box1 + area_boxes2 - intersection
iou = intersection / union
return iou | [
"torch.sort",
"torch.unique",
"torch.sigmoid",
"torch.max",
"torch.exp",
"torch.min",
"torch.clamp",
"torch.nonzero",
"torch.cat",
"torch.clone",
"numpy.meshgrid",
"torch.FloatTensor",
"numpy.arange"
] | [((810, 835), 'torch.sigmoid', 'torch.sigmoid', (['x[:, :, 0]'], {}), '(x[:, :, 0])\n', (823, 835), False, 'import torch\n'), ((849, 874), 'torch.sigmoid', 'torch.sigmoid', (['x[:, :, 1]'], {}), '(x[:, :, 1])\n', (862, 874), False, 'import torch\n'), ((888, 913), 'torch.sigmoid', 'torch.sigmoid', (['x[:, :, 4]'], {}), '(x[:, :, 4])\n', (901, 913), False, 'import torch\n'), ((982, 1002), 'numpy.arange', 'np.arange', (['fmap_size'], {}), '(fmap_size)\n', (991, 1002), True, 'import numpy as np\n'), ((1043, 1066), 'numpy.meshgrid', 'np.meshgrid', (['grid', 'grid'], {}), '(grid, grid)\n', (1054, 1066), True, 'import numpy as np\n'), ((1342, 1368), 'torch.FloatTensor', 'torch.FloatTensor', (['anchors'], {}), '(anchors)\n', (1359, 1368), False, 'import torch\n'), ((1587, 1613), 'torch.sigmoid', 'torch.sigmoid', (['x[:, :, 5:]'], {}), '(x[:, :, 5:])\n', (1600, 1613), False, 'import torch\n'), ((1745, 1774), 'torch.unique', 'torch.unique', (['classes'], {'dim': '(-1)'}), '(classes, dim=-1)\n', (1757, 1774), False, 'import torch\n'), ((2041, 2063), 'torch.clone', 'torch.clone', (['pred_mask'], {}), '(pred_mask)\n', (2052, 2063), False, 'import torch\n'), ((4289, 4321), 'torch.max', 'torch.max', (['box1[0]', 'boxes2[:, 0]'], {}), '(box1[0], boxes2[:, 0])\n', (4298, 4321), False, 'import torch\n'), ((4331, 4363), 'torch.max', 'torch.max', (['box1[1]', 'boxes2[:, 1]'], {}), '(box1[1], boxes2[:, 1])\n', (4340, 4363), False, 'import torch\n'), ((4373, 4405), 'torch.min', 'torch.min', (['box1[2]', 'boxes2[:, 2]'], {}), '(box1[2], boxes2[:, 2])\n', (4382, 4405), False, 'import torch\n'), ((4415, 4447), 'torch.min', 'torch.min', (['box1[3]', 'boxes2[:, 3]'], {}), '(box1[3], boxes2[:, 3])\n', (4424, 4447), False, 'import torch\n'), ((1483, 1506), 'torch.exp', 'torch.exp', (['x[:, :, 2:4]'], {}), '(x[:, :, 2:4])\n', (1492, 1506), False, 'import torch\n'), ((2452, 2515), 'torch.max', 'torch.max', (['img_pred[:, 5:5 + num_classes]'], {'dim': '(-1)', 'keepdim': '(True)'}), '(img_pred[:, 5:5 + num_classes], dim=-1, keepdim=True)\n', (2461, 2515), False, 'import torch\n'), ((4466, 4495), 'torch.clamp', 'torch.clamp', (['(xi2 - xi1)'], {'min': '(0)'}), '(xi2 - xi1, min=0)\n', (4477, 4495), False, 'import torch\n'), ((4496, 4525), 'torch.clamp', 'torch.clamp', (['(yi2 - yi1)'], {'min': '(0)'}), '(yi2 - yi1, min=0)\n', (4507, 4525), False, 'import torch\n'), ((1082, 1102), 'torch.FloatTensor', 'torch.FloatTensor', (['a'], {}), '(a)\n', (1099, 1102), False, 'import torch\n'), ((1129, 1149), 'torch.FloatTensor', 'torch.FloatTensor', (['b'], {}), '(b)\n', (1146, 1149), False, 'import torch\n'), ((3124, 3173), 'torch.sort', 'torch.sort', (['img_pred_class[:, 4]'], {'descending': '(True)'}), '(img_pred_class[:, 4], descending=True)\n', (3134, 3173), False, 'import torch\n'), ((2633, 2662), 'torch.nonzero', 'torch.nonzero', (['img_pred[:, 4]'], {}), '(img_pred[:, 4])\n', (2646, 2662), False, 'import torch\n'), ((4043, 4060), 'torch.cat', 'torch.cat', (['seq', '(1)'], {}), '(seq, 1)\n', (4052, 4060), False, 'import torch\n'), ((4129, 4146), 'torch.cat', 'torch.cat', (['seq', '(1)'], {}), '(seq, 1)\n', (4138, 4146), False, 'import torch\n'), ((4171, 4195), 'torch.cat', 'torch.cat', (['(output, out)'], {}), '((output, out))\n', (4180, 4195), False, 'import torch\n'), ((2975, 3004), 'torch.nonzero', 'torch.nonzero', (['cl_mask[:, -2]'], {}), '(cl_mask[:, -2])\n', (2988, 3004), False, 'import torch\n'), ((3639, 3674), 'torch.nonzero', 'torch.nonzero', (['img_pred_class[:, 4]'], {}), '(img_pred_class[:, 4])\n', (3652, 3674), False, 'import torch\n'), ((1178, 1212), 'torch.cat', 'torch.cat', (['(x_offset, y_offset)', '(1)'], {}), '((x_offset, y_offset), 1)\n', (1187, 1212), False, 'import torch\n')] |
import errno
import os
import numpy as np
from numpy.testing import assert_equal
import pytest
import nengo
from nengo.cache import (
DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache)
from nengo.utils.compat import int_types
from nengo.utils.testing import Timer
class SolverMock(object):
n_calls = {}
def __init__(self, name='solver_mock'):
self.n_calls[self] = 0
self.__module__ = __name__
self.__name__ = name
def __call__(self, A, Y, rng=np.random, E=None):
self.n_calls[self] += 1
if E is None:
return np.random.rand(A.shape[1], Y.shape[1]), {'info': 'v'}
else:
return np.random.rand(A.shape[1], E.shape[1]), {'info': 'v'}
def get_solver_test_args():
M = 100
N = 10
D = 2
return {
'activities': np.ones((M, D)),
'targets': np.ones((M, N)),
'rng': np.random.RandomState(42),
}
def get_weight_solver_test_args():
M = 100
N = 10
N2 = 5
D = 2
return {
'activities': np.ones((M, D)),
'targets': np.ones((M, N)),
'rng': np.random.RandomState(42),
'E': np.ones((D, N2)),
}
def test_decoder_cache(tmpdir):
cache_dir = str(tmpdir)
# Basic test, that results are cached.
cache = DecoderCache(cache_dir=cache_dir)
solver_mock = SolverMock()
decoders1, solver_info1 = cache.wrap_solver(solver_mock)(
**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
decoders2, solver_info2 = cache.wrap_solver(solver_mock)(
**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1 # result read from cache?
assert_equal(decoders1, decoders2)
assert solver_info1 == solver_info2
solver_args = get_solver_test_args()
solver_args['activities'] *= 2
decoders3, solver_info3 = cache.wrap_solver(solver_mock)(**solver_args)
assert SolverMock.n_calls[solver_mock] == 2
assert np.any(decoders1 != decoders3)
# Test that the cache does not load results of another solver.
another_solver = SolverMock('another_solver')
cache.wrap_solver(another_solver)(**get_solver_test_args())
assert SolverMock.n_calls[another_solver] == 1
def test_corrupted_decoder_cache(tmpdir):
cache_dir = str(tmpdir)
cache = DecoderCache(cache_dir=cache_dir)
solver_mock = SolverMock()
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
# corrupt the cache
for path in cache.get_files():
with open(path, 'w') as f:
f.write('corrupted')
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 2
def test_decoder_cache_invalidation(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
# Basic test, that results are cached.
cache = DecoderCache(cache_dir=cache_dir)
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
cache.invalidate()
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 2
def test_decoder_cache_size_includes_overhead(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
cache = DecoderCache(cache_dir=cache_dir)
cache.wrap_solver(solver_mock)(**get_solver_test_args())
fragment_size = get_fragment_size(cache_dir)
actual_size = sum(os.stat(p).st_size for p in cache.get_files())
assert actual_size % fragment_size != 0, (
'Test succeeded by chance. Adjust get_solver_test_args() to produce '
'date not aligned with the files system fragment size.')
assert cache.get_size_in_bytes() % fragment_size == 0
def test_decoder_cache_shrinking(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
another_solver = SolverMock('another_solver')
cache = DecoderCache(cache_dir=cache_dir)
cache.wrap_solver(solver_mock)(**get_solver_test_args())
# Ensure differing time stamps (depending on the file system the timestamp
# resolution might be as bad as 1 day).
for path in cache.get_files():
timestamp = os.stat(path).st_atime
timestamp -= 60 * 60 * 24 * 2 # 2 days
os.utime(path, (timestamp, timestamp))
cache.wrap_solver(another_solver)(**get_solver_test_args())
cache_size = cache.get_size_in_bytes()
assert cache_size > 0
cache.shrink(cache_size - 1)
# check that older cached result was removed
assert SolverMock.n_calls[solver_mock] == 1
cache.wrap_solver(another_solver)(**get_solver_test_args())
cache.wrap_solver(solver_mock)(**get_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 2
assert SolverMock.n_calls[another_solver] == 1
def test_decoder_cache_shrink_threadsafe(monkeypatch, tmpdir):
"""Tests that shrink handles files deleted by other processes."""
cache_dir = str(tmpdir)
solver_mock = SolverMock()
another_solver = SolverMock('another_solver')
cache = DecoderCache(cache_dir=cache_dir)
cache.wrap_solver(solver_mock)(**get_solver_test_args())
limit = cache.get_size()
# Ensure differing time stamps (depending on the file system the timestamp
# resolution might be as bad as 1 day).
for filename in os.listdir(cache.cache_dir):
path = os.path.join(cache.cache_dir, filename)
timestamp = os.stat(path).st_atime
timestamp -= 60 * 60 * 24 * 2 # 2 days
os.utime(path, (timestamp, timestamp))
cache.wrap_solver(another_solver)(**get_solver_test_args())
cache_size = cache.get_size_in_bytes()
assert cache_size > 0
def raise_file_not_found(*args, **kwargs):
raise OSError(errno.ENOENT, "File not found.")
monkeypatch.setattr(cache, 'get_size_in_bytes', lambda: cache_size)
monkeypatch.setattr('os.stat', raise_file_not_found)
monkeypatch.setattr('os.remove', raise_file_not_found)
monkeypatch.setattr('os.unlink', raise_file_not_found)
cache.shrink(limit)
def test_decoder_cache_with_E_argument_to_solver(tmpdir):
cache_dir = str(tmpdir)
solver_mock = SolverMock()
cache = DecoderCache(cache_dir=cache_dir)
decoders1, solver_info1 = cache.wrap_solver(solver_mock)(
**get_weight_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1
decoders2, solver_info2 = cache.wrap_solver(solver_mock)(
**get_weight_solver_test_args())
assert SolverMock.n_calls[solver_mock] == 1 # read from cache?
assert_equal(decoders1, decoders2)
assert solver_info1 == solver_info2
class DummyA(object):
def __init__(self, attr=0):
self.attr = attr
class DummyB(object):
def __init__(self, attr=0):
self.attr = attr
def dummy_fn_a(arg):
pass
def dummy_fn_b(arg):
pass
@pytest.mark.parametrize('reference, equal, different', (
(True, True, False), # bool
(False, False, True), # bool
(1.0, 1.0, 2.0), # float
(1.0 + 2.0j, 1 + 2j, 2.0 + 1j), # complex
(b'a', b'a', b'b'), # bytes
(u'a', u'a', u'b'), # unicode string
(np.eye(2), np.eye(2), np.array([[0, 1], [1, 0]])), # array
({'a': 1, 'b': 2}, {'a': 1, 'b': 2}, {'a': 2, 'b': 1}), # dict
((1, 2), (1, 2), (2, 1)), # tuple
([1, 2], [1, 2], [2, 1]), # list
(DummyA(), DummyA(), DummyB()), # object instance
(DummyA(1), DummyA(1), DummyA(2)), # object instance
(dummy_fn_a, dummy_fn_a, dummy_fn_b), # function
) + tuple((typ(1), typ(1), typ(2)) for typ in int_types))
def test_fingerprinting(reference, equal, different):
assert str(Fingerprint(reference)) == str(Fingerprint(equal))
assert str(Fingerprint(reference)) != str(Fingerprint(different))
def test_fails_for_lambda_expression():
with pytest.raises(ValueError):
Fingerprint(lambda x: x)
def test_cache_works(tmpdir, Simulator, seed):
cache_dir = str(tmpdir)
model = nengo.Network(seed=seed)
with model:
nengo.Connection(nengo.Ensemble(10, 1), nengo.Ensemble(10, 1))
assert len(os.listdir(cache_dir)) == 0
Simulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=DecoderCache(cache_dir=cache_dir)))
assert len(os.listdir(cache_dir)) == 2 # legacy.txt and *.nco
def calc_relative_timer_diff(t1, t2):
return (t2.duration - t1.duration) / (t2.duration + t1.duration)
@pytest.mark.slow
def test_cache_performance(tmpdir, Simulator, seed):
cache_dir = str(tmpdir)
model = nengo.Network(seed=seed)
with model:
nengo.Connection(nengo.Ensemble(2000, 10), nengo.Ensemble(2000, 10))
with Timer() as t_no_cache:
Simulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=NoDecoderCache()))
with Timer() as t_cache_miss:
Simulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=DecoderCache(cache_dir=cache_dir)))
with Timer() as t_cache_hit:
Simulator(model, model=nengo.builder.Model(
dt=0.001, decoder_cache=DecoderCache(cache_dir=cache_dir)))
assert calc_relative_timer_diff(t_no_cache, t_cache_miss) < 0.1
assert calc_relative_timer_diff(t_cache_hit, t_no_cache) > 0.4
| [
"numpy.testing.assert_equal",
"numpy.random.rand",
"numpy.array",
"numpy.random.RandomState",
"nengo.cache.get_fragment_size",
"nengo.cache.Fingerprint",
"os.listdir",
"nengo.Ensemble",
"nengo.cache.DecoderCache",
"numpy.eye",
"numpy.ones",
"numpy.any",
"nengo.utils.testing.Timer",
"pytest... | [((1299, 1332), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (1311, 1332), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((1683, 1717), 'numpy.testing.assert_equal', 'assert_equal', (['decoders1', 'decoders2'], {}), '(decoders1, decoders2)\n', (1695, 1717), False, 'from numpy.testing import assert_equal\n'), ((1970, 2000), 'numpy.any', 'np.any', (['(decoders1 != decoders3)'], {}), '(decoders1 != decoders3)\n', (1976, 2000), True, 'import numpy as np\n'), ((2319, 2352), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (2331, 2352), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((2893, 2926), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (2905, 2926), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((3297, 3330), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (3309, 3330), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((3413, 3441), 'nengo.cache.get_fragment_size', 'get_fragment_size', (['cache_dir'], {}), '(cache_dir)\n', (3430, 3441), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((3926, 3959), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (3938, 3959), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((5066, 5099), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (5078, 5099), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((5334, 5361), 'os.listdir', 'os.listdir', (['cache.cache_dir'], {}), '(cache.cache_dir)\n', (5344, 5361), False, 'import os\n'), ((6199, 6232), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (6211, 6232), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((6559, 6593), 'numpy.testing.assert_equal', 'assert_equal', (['decoders1', 'decoders2'], {}), '(decoders1, decoders2)\n', (6571, 6593), False, 'from numpy.testing import assert_equal\n'), ((8044, 8068), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (8057, 8068), False, 'import nengo\n'), ((8606, 8630), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (8619, 8630), False, 'import nengo\n'), ((832, 847), 'numpy.ones', 'np.ones', (['(M, D)'], {}), '((M, D))\n', (839, 847), True, 'import numpy as np\n'), ((868, 883), 'numpy.ones', 'np.ones', (['(M, N)'], {}), '((M, N))\n', (875, 883), True, 'import numpy as np\n'), ((900, 925), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (921, 925), True, 'import numpy as np\n'), ((1049, 1064), 'numpy.ones', 'np.ones', (['(M, D)'], {}), '((M, D))\n', (1056, 1064), True, 'import numpy as np\n'), ((1085, 1100), 'numpy.ones', 'np.ones', (['(M, N)'], {}), '((M, N))\n', (1092, 1100), True, 'import numpy as np\n'), ((1117, 1142), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (1138, 1142), True, 'import numpy as np\n'), ((1157, 1173), 'numpy.ones', 'np.ones', (['(D, N2)'], {}), '((D, N2))\n', (1164, 1173), True, 'import numpy as np\n'), ((4279, 4317), 'os.utime', 'os.utime', (['path', '(timestamp, timestamp)'], {}), '(path, (timestamp, timestamp))\n', (4287, 4317), False, 'import os\n'), ((5378, 5417), 'os.path.join', 'os.path.join', (['cache.cache_dir', 'filename'], {}), '(cache.cache_dir, filename)\n', (5390, 5417), False, 'import os\n'), ((5517, 5555), 'os.utime', 'os.utime', (['path', '(timestamp, timestamp)'], {}), '(path, (timestamp, timestamp))\n', (5525, 5555), False, 'import os\n'), ((7894, 7919), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7907, 7919), False, 'import pytest\n'), ((7929, 7953), 'nengo.cache.Fingerprint', 'Fingerprint', (['(lambda x: x)'], {}), '(lambda x: x)\n', (7940, 7953), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((8734, 8741), 'nengo.utils.testing.Timer', 'Timer', ([], {}), '()\n', (8739, 8741), False, 'from nengo.utils.testing import Timer\n'), ((8873, 8880), 'nengo.utils.testing.Timer', 'Timer', ([], {}), '()\n', (8878, 8880), False, 'from nengo.utils.testing import Timer\n'), ((9031, 9038), 'nengo.utils.testing.Timer', 'Timer', ([], {}), '()\n', (9036, 9038), False, 'from nengo.utils.testing import Timer\n'), ((4200, 4213), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (4207, 4213), False, 'import os\n'), ((5438, 5451), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (5445, 5451), False, 'import os\n'), ((7722, 7744), 'nengo.cache.Fingerprint', 'Fingerprint', (['reference'], {}), '(reference)\n', (7733, 7744), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((7753, 7771), 'nengo.cache.Fingerprint', 'Fingerprint', (['equal'], {}), '(equal)\n', (7764, 7771), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((7788, 7810), 'nengo.cache.Fingerprint', 'Fingerprint', (['reference'], {}), '(reference)\n', (7799, 7810), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((7819, 7841), 'nengo.cache.Fingerprint', 'Fingerprint', (['different'], {}), '(different)\n', (7830, 7841), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((8110, 8131), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (8124, 8131), False, 'import nengo\n'), ((8133, 8154), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (8147, 8154), False, 'import nengo\n'), ((8172, 8193), 'os.listdir', 'os.listdir', (['cache_dir'], {}), '(cache_dir)\n', (8182, 8193), False, 'import os\n'), ((8331, 8352), 'os.listdir', 'os.listdir', (['cache_dir'], {}), '(cache_dir)\n', (8341, 8352), False, 'import os\n'), ((8672, 8696), 'nengo.Ensemble', 'nengo.Ensemble', (['(2000)', '(10)'], {}), '(2000, 10)\n', (8686, 8696), False, 'import nengo\n'), ((8698, 8722), 'nengo.Ensemble', 'nengo.Ensemble', (['(2000)', '(10)'], {}), '(2000, 10)\n', (8712, 8722), False, 'import nengo\n'), ((593, 631), 'numpy.random.rand', 'np.random.rand', (['A.shape[1]', 'Y.shape[1]'], {}), '(A.shape[1], Y.shape[1])\n', (607, 631), True, 'import numpy as np\n'), ((680, 718), 'numpy.random.rand', 'np.random.rand', (['A.shape[1]', 'E.shape[1]'], {}), '(A.shape[1], E.shape[1])\n', (694, 718), True, 'import numpy as np\n'), ((3464, 3474), 'os.stat', 'os.stat', (['p'], {}), '(p)\n', (3471, 3474), False, 'import os\n'), ((7204, 7213), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7210, 7213), True, 'import numpy as np\n'), ((7215, 7224), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (7221, 7224), True, 'import numpy as np\n'), ((7226, 7252), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (7234, 7252), True, 'import numpy as np\n'), ((8280, 8313), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (8292, 8313), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((8845, 8861), 'nengo.cache.NoDecoderCache', 'NoDecoderCache', ([], {}), '()\n', (8859, 8861), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((8986, 9019), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (8998, 9019), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n'), ((9143, 9176), 'nengo.cache.DecoderCache', 'DecoderCache', ([], {'cache_dir': 'cache_dir'}), '(cache_dir=cache_dir)\n', (9155, 9176), False, 'from nengo.cache import DecoderCache, Fingerprint, get_fragment_size, NoDecoderCache\n')] |
from dataclasses import dataclass, replace
from typing import Type
import numpy as np
from numpy import ndarray
from ..element import Element, ElementLineP1
from .mesh import Mesh
from .mesh_quad_1 import MeshQuad1
from .mesh_simplex import MeshSimplex
@dataclass(repr=False)
class MeshLine1(MeshSimplex, Mesh):
"""A one-dimensional mesh."""
doflocs: ndarray = np.array([[0., 1.]], dtype=np.float64)
t: ndarray = np.array([[0], [1]], dtype=np.int64)
elem: Type[Element] = ElementLineP1
affine: bool = True
def __mul__(self, other):
from .mesh_line_1 import MeshLine1
if isinstance(other, MeshLine1):
return MeshQuad1.init_tensor(self.p[0], other.p[0])
return other * self
def _uniform(self):
p, t = self.doflocs, self.t
newp = np.hstack((p, p[:, t].mean(axis=1)))
newt = np.empty((t.shape[0], 2 * t.shape[1]),
dtype=t.dtype)
newt[0, ::2] = t[0]
newt[0, 1::2] = p.shape[1] + np.arange(t.shape[1])
newt[1, ::2] = newt[0, 1::2]
newt[1, 1::2] = t[1]
return replace(
self,
doflocs=newp,
t=newt,
_boundaries=None,
_subdomains=None,
)
def _adaptive(self, marked):
p, t = self.doflocs, self.t
mid = range(len(marked)) + np.max(t) + 1
nonmarked = np.setdiff1d(np.arange(t.shape[1]), marked)
newp = np.hstack((p, p[:, t[:, marked]].mean(1)))
newt = np.vstack((t[0, marked], mid))
newt = np.hstack((t[:, nonmarked],
newt,
np.vstack((mid, t[1, marked]))))
return replace(
self,
doflocs=newp,
t=newt,
)
def param(self):
return np.max(np.abs(self.p[0, self.t[1]] - self.p[0, self.t[0]]))
def element_finder(self, mapping=None):
ix = np.argsort(self.p[0])
maxt = self.t[np.argmax(self.p[0, self.t], 0),
np.arange(self.t.shape[1])]
def finder(x):
xin = x.copy() # bring endpoint inside for np.digitize
xin[x == self.p[0, ix[-1]]] = self.p[0, ix[-2:]].mean()
elems = np.nonzero(ix[np.digitize(xin, self.p[0, ix])][:, None]
== maxt)[1]
if len(elems) < len(x):
raise ValueError("Point is outside of the mesh.")
return elems
return finder
@staticmethod
def strip_extra_coordinates(p: ndarray) -> ndarray:
return p[:, :1]
| [
"numpy.abs",
"numpy.digitize",
"dataclasses.dataclass",
"numpy.argmax",
"numpy.max",
"numpy.argsort",
"numpy.array",
"numpy.empty",
"numpy.vstack",
"dataclasses.replace",
"numpy.arange"
] | [((258, 279), 'dataclasses.dataclass', 'dataclass', ([], {'repr': '(False)'}), '(repr=False)\n', (267, 279), False, 'from dataclasses import dataclass, replace\n'), ((374, 414), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {'dtype': 'np.float64'}), '([[0.0, 1.0]], dtype=np.float64)\n', (382, 414), True, 'import numpy as np\n'), ((430, 466), 'numpy.array', 'np.array', (['[[0], [1]]'], {'dtype': 'np.int64'}), '([[0], [1]], dtype=np.int64)\n', (438, 466), True, 'import numpy as np\n'), ((870, 923), 'numpy.empty', 'np.empty', (['(t.shape[0], 2 * t.shape[1])'], {'dtype': 't.dtype'}), '((t.shape[0], 2 * t.shape[1]), dtype=t.dtype)\n', (878, 923), True, 'import numpy as np\n'), ((1117, 1188), 'dataclasses.replace', 'replace', (['self'], {'doflocs': 'newp', 't': 'newt', '_boundaries': 'None', '_subdomains': 'None'}), '(self, doflocs=newp, t=newt, _boundaries=None, _subdomains=None)\n', (1124, 1188), False, 'from dataclasses import dataclass, replace\n'), ((1517, 1547), 'numpy.vstack', 'np.vstack', (['(t[0, marked], mid)'], {}), '((t[0, marked], mid))\n', (1526, 1547), True, 'import numpy as np\n'), ((1698, 1733), 'dataclasses.replace', 'replace', (['self'], {'doflocs': 'newp', 't': 'newt'}), '(self, doflocs=newp, t=newt)\n', (1705, 1733), False, 'from dataclasses import dataclass, replace\n'), ((1937, 1958), 'numpy.argsort', 'np.argsort', (['self.p[0]'], {}), '(self.p[0])\n', (1947, 1958), True, 'import numpy as np\n'), ((1013, 1034), 'numpy.arange', 'np.arange', (['t.shape[1]'], {}), '(t.shape[1])\n', (1022, 1034), True, 'import numpy as np\n'), ((1413, 1434), 'numpy.arange', 'np.arange', (['t.shape[1]'], {}), '(t.shape[1])\n', (1422, 1434), True, 'import numpy as np\n'), ((1825, 1876), 'numpy.abs', 'np.abs', (['(self.p[0, self.t[1]] - self.p[0, self.t[0]])'], {}), '(self.p[0, self.t[1]] - self.p[0, self.t[0]])\n', (1831, 1876), True, 'import numpy as np\n'), ((1366, 1375), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (1372, 1375), True, 'import numpy as np\n'), ((1649, 1679), 'numpy.vstack', 'np.vstack', (['(mid, t[1, marked])'], {}), '((mid, t[1, marked]))\n', (1658, 1679), True, 'import numpy as np\n'), ((1981, 2012), 'numpy.argmax', 'np.argmax', (['self.p[0, self.t]', '(0)'], {}), '(self.p[0, self.t], 0)\n', (1990, 2012), True, 'import numpy as np\n'), ((2036, 2062), 'numpy.arange', 'np.arange', (['self.t.shape[1]'], {}), '(self.t.shape[1])\n', (2045, 2062), True, 'import numpy as np\n'), ((2258, 2289), 'numpy.digitize', 'np.digitize', (['xin', 'self.p[0, ix]'], {}), '(xin, self.p[0, ix])\n', (2269, 2289), True, 'import numpy as np\n')] |
from __future__ import print_function
from scipy.interpolate import interp1d
import numpy as np
import math
from aeropy.airfoil_module import CST
def taper_function(eta, shape = 'linear', points = {'eta':[0,1], 'chord':[1,.7]}):
"""Calculate chord along span of the wing.
- If linear, taper function is a conjuction of lines connecting points
- Possible shapes are the same as interp1d: ('linear', 'nearest',
'zero', 'slinear', 'quadratic', 'cubic' where 'zero', 'slinear',
'quadratic' and 'cubic' refer to a spline interpolation of zeroth,
first, second or third order"""
function = interp1d(points['eta'], points['chord'])
return function(eta)
def twist_function(eta, shape = 'linear', points = {'eta':[0,1], 'delta_twist':[0,.1]}):
"""Calculate chord along span of the wing.
- If linear, taper function is a conjuction of lines connecting points
- Possible shapes are the same as interp1d: ('linear', 'nearest',
'zero', 'slinear', 'quadratic', 'cubic' where 'zero', 'slinear',
'quadratic' and 'cubic' refer to a spline interpolation of zeroth,
first, second or third order"""
function = interp1d(points['eta'], points['delta_twist'])
return function(eta)
def CST_3D(Bu, Bl, span, N={'eta':[0,1], 'N1':[.5, .5], 'N2':[1., 1.], 'chord':[1., 0]},
mesh = (100,100), chord = {'eta':[0,1], 'A':[1.], 'N1':1, 'N2':1, 'initial_chord':1.},
sweep = {'eta':[0,1], 'A':[1.], 'N1':1, 'N2':1, 'x_LE_initial':0, 'x_LE_final':0}):
"""
- Bu: upper shape coefficients
- Bl: lower shape coefficients
- mesh: list of number of points in x and y
"""
def S(B, psi, eta):
""" Cross section shape function. Validated for high dimensions.
To debug just verify if it turns all ones when B=ones"""
def S_i(r, n, psi):
"""Shape function"""
value = K(r,n)*(psi**r)*(1.-psi)**(n-r)
return value
# Bersntein Polynomial
def K(r,n):
K=math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
Nx = len(B)-1
Ny = len(B[0])-1
output = 0
for i in range(Nx+1):
for j in range(Ny+1):
output += B[i][j]*S_i(i, Nx, psi)*S_i(j, Ny, eta)
return output
def C(N, psi, eta):
"""Class function"""
N1 = interp1d(N['eta'], N['N1'])
N2 = interp1d(N['eta'], N['N2'])
output = ((psi)**N1(eta))*((1.-psi)**N2(eta))
return output
psi = np.linspace(0,1,mesh[0])
eta = np.linspace(0,1,mesh[1])
zeta_u = np.zeros(mesh)
zeta_l = np.zeros(mesh)
for i in range(mesh[0]):
for j in range(mesh[1]):
zeta_u[j][i] = C(N, psi[i], eta[j])*S(Bu, psi[i], eta[j])
zeta_l[j][i] = -C(N, psi[i], eta[j])*S(Bl, psi[i], eta[j])
print(eta)
print(chord['initial_chord'])
print(chord['A'])
print(chord['N1'], chord['N2'])
chord_distribution = CST(eta, chord['eta'][1], chord['initial_chord'], Au=chord['A'], N1=chord['N1'], N2=chord['N2'])
sweep_distribution = CST(eta, sweep['eta'][1], deltasz = sweep['x_LE_final']-.5*chord['initial_chord'], Au=sweep['A'], N1=sweep['N1'], N2=sweep['N2'])
chord_distribution = chord_distribution[::-1]
sweep_distribution = sweep_distribution
# taper_function(eta, shape = 'linear', N)
x = np.zeros(len(psi))
for i in range(len(x)):
x[i] = psi[i]*chord_distribution[i]
print(chord_distribution)
print(sweep_distribution)
print(x)
print(psi)
y = eta
X = np.zeros(mesh)
Y = np.zeros(mesh)
Z_u = np.zeros(mesh)
Z_l = np.zeros(mesh)
for i in range(mesh[0]):
for j in range(mesh[1]):
X[j][i] = psi[i]*chord_distribution[j] - sweep_distribution[j] -.5*chord['initial_chord']
Y[j][i] = span*eta[j]
Z_u[j][i] = zeta_u[j][i]*chord_distribution[j]
Z_l[j][i] = zeta_l[j][i]*chord_distribution[j]
return [X,Y,Z_u,Z_l]
if __name__ == '__main__':
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Inputs
# One of the diameters
initial_chord = 1.
# Nosecone height
span = 4.
# Shape coefficient for cross section (if A=1, circular, otherwise it is an ellipse)
A = 1.
# location of the nosecone tip
nosecone_x = 0.2
# Class coefficient for chord distribution (Nb=.5, elliptical, Nb=1, Haack series)
Nb = 1.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#B = [[1,1], [1.,1]]
B = [[A], [A]]
Na = 1.
x = np.linspace(0,1)
[X,Y,Z_u, Z_l] = CST_3D(B, B, mesh =(50,50), span=span,
N={'eta':[0,1], 'N1':[.5, .5], 'N2':[.5, .5]},
chord = {'eta':[0,1], 'A':[1.], 'N1':Na, 'N2':Nb, 'initial_chord':initial_chord},
sweep = {'eta':[0,1], 'A':[.5], 'N1':Nb, 'N2':Na, 'x_LE_final':nosecone_x})
fig = plt.figure()
ax = fig.gca(projection='3d')
surf_u = ax.plot_surface(X, Z_u, Y, cmap=plt.get_cmap('jet'),
linewidth=0, antialiased=False)
surf_l = ax.plot_surface(X, Z_l, Y, cmap=plt.get_cmap('jet'),
linewidth=0, antialiased=False)
# cset = ax.contour(X, Z_u, Y, zdir='z', offset=0, cmap=cm.coolwarm)
# cset = ax.contour(X, Z_l, Y, zdir='z', offset=0, cmap=cm.coolwarm)
# cset = ax.contour(X, Z_u, Y, zdir='x', offset=-.1, cmap=cm.coolwarm)
# cset = ax.contour(X, Z_l, Y, zdir='x', offset=-.1, cmap=cm.coolwarm)
# cset = ax.contour(X, Z_u, Y, zdir='y', offset =0.5, cmap=cm.coolwarm)
# cset = ax.contour(X, Z_l, Y, zdir='y', offset =0.5, cmap=cm.coolwarm)
# Customize the z axis.
ax.set_zlim(0, 4)
max_range = np.array([X.max()-X.min(), Z_u.max()-Z_l.min(), Y.max()-Y.min()]).max() / 2.0
mid_x = (X.max()+X.min()) * 0.5
mid_y = (Y.max()+Y.min()) * 0.5
mid_z = (Z_u.max()+Z_l.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_z - max_range, mid_z + max_range)
ax.set_zlim(mid_y - max_range, mid_y + max_range)
plt.show()
| [
"aeropy.airfoil_module.CST",
"math.factorial",
"scipy.interpolate.interp1d",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.show"
] | [((645, 685), 'scipy.interpolate.interp1d', 'interp1d', (["points['eta']", "points['chord']"], {}), "(points['eta'], points['chord'])\n", (653, 685), False, 'from scipy.interpolate import interp1d\n'), ((1211, 1257), 'scipy.interpolate.interp1d', 'interp1d', (["points['eta']", "points['delta_twist']"], {}), "(points['eta'], points['delta_twist'])\n", (1219, 1257), False, 'from scipy.interpolate import interp1d\n'), ((2632, 2658), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'mesh[0]'], {}), '(0, 1, mesh[0])\n', (2643, 2658), True, 'import numpy as np\n'), ((2668, 2694), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'mesh[1]'], {}), '(0, 1, mesh[1])\n', (2679, 2694), True, 'import numpy as np\n'), ((2709, 2723), 'numpy.zeros', 'np.zeros', (['mesh'], {}), '(mesh)\n', (2717, 2723), True, 'import numpy as np\n'), ((2738, 2752), 'numpy.zeros', 'np.zeros', (['mesh'], {}), '(mesh)\n', (2746, 2752), True, 'import numpy as np\n'), ((3097, 3198), 'aeropy.airfoil_module.CST', 'CST', (['eta', "chord['eta'][1]", "chord['initial_chord']"], {'Au': "chord['A']", 'N1': "chord['N1']", 'N2': "chord['N2']"}), "(eta, chord['eta'][1], chord['initial_chord'], Au=chord['A'], N1=chord[\n 'N1'], N2=chord['N2'])\n", (3100, 3198), False, 'from aeropy.airfoil_module import CST\n'), ((3220, 3357), 'aeropy.airfoil_module.CST', 'CST', (['eta', "sweep['eta'][1]"], {'deltasz': "(sweep['x_LE_final'] - 0.5 * chord['initial_chord'])", 'Au': "sweep['A']", 'N1': "sweep['N1']", 'N2': "sweep['N2']"}), "(eta, sweep['eta'][1], deltasz=sweep['x_LE_final'] - 0.5 * chord[\n 'initial_chord'], Au=sweep['A'], N1=sweep['N1'], N2=sweep['N2'])\n", (3223, 3357), False, 'from aeropy.airfoil_module import CST\n'), ((3712, 3726), 'numpy.zeros', 'np.zeros', (['mesh'], {}), '(mesh)\n', (3720, 3726), True, 'import numpy as np\n'), ((3736, 3750), 'numpy.zeros', 'np.zeros', (['mesh'], {}), '(mesh)\n', (3744, 3750), True, 'import numpy as np\n'), ((3762, 3776), 'numpy.zeros', 'np.zeros', (['mesh'], {}), '(mesh)\n', (3770, 3776), True, 'import numpy as np\n'), ((3788, 3802), 'numpy.zeros', 'np.zeros', (['mesh'], {}), '(mesh)\n', (3796, 3802), True, 'import numpy as np\n'), ((4830, 4847), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (4841, 4847), True, 'import numpy as np\n'), ((5219, 5231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5229, 5231), True, 'import matplotlib.pyplot as plt\n'), ((6413, 6423), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6421, 6423), True, 'import matplotlib.pyplot as plt\n'), ((2471, 2498), 'scipy.interpolate.interp1d', 'interp1d', (["N['eta']", "N['N1']"], {}), "(N['eta'], N['N1'])\n", (2479, 2498), False, 'from scipy.interpolate import interp1d\n'), ((2513, 2540), 'scipy.interpolate.interp1d', 'interp1d', (["N['eta']", "N['N2']"], {}), "(N['eta'], N['N2'])\n", (2521, 2540), False, 'from scipy.interpolate import interp1d\n'), ((5313, 5332), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (5325, 5332), True, 'import matplotlib.pyplot as plt\n'), ((5436, 5455), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (5448, 5455), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2108), 'math.factorial', 'math.factorial', (['n'], {}), '(n)\n', (2105, 2108), False, 'import math\n'), ((2110, 2127), 'math.factorial', 'math.factorial', (['r'], {}), '(r)\n', (2124, 2127), False, 'import math\n'), ((2128, 2149), 'math.factorial', 'math.factorial', (['(n - r)'], {}), '(n - r)\n', (2142, 2149), False, 'import math\n')] |
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
# Compare 5 turbine results to SOWFA in 8 m/s, higher TI case
import copy
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import floris.tools as wfct
# Parameters
num_turbines = 3
sowfa_U0 = 8.0
sowfa_TI = 0.1 # High = 0.1, low = 0.06
# Can chose between two x layouts
# layout_x = (1000.0, 1756.0, 2512.0)
layout_x = (1000.0, 1882.0, 2764.0)
layout_y = (1000.0, 1000.0, 1000.0)
# Grab certain hi-TI five simulations from saved SOWFA data set
df_sowfa = pd.read_pickle("../sowfa_data_set/sowfa_data_set.p")
# Limit number of turbines
df_sowfa = df_sowfa[df_sowfa.num_turbines == num_turbines]
# Limit to wind speed
df_sowfa = df_sowfa[df_sowfa.sowfa_U0 == sowfa_U0]
# Limit to turbulence
df_sowfa = df_sowfa[df_sowfa.sowfa_TI == sowfa_TI]
# Limit to particular layout
df_sowfa = df_sowfa[df_sowfa.layout_x == layout_x]
df_sowfa = df_sowfa[df_sowfa.layout_y == layout_y]
# Sort by total sowfa power
df_sowfa["total_sowfa_power"] = df_sowfa.power.apply(np.sum)
df_sowfa = df_sowfa.sort_values("total_sowfa_power")
# Load the saved FLORIS interfaces
fi_dict = pickle.load(open("../floris_models.p", "rb"))
# Resimulate the SOWFA cases
for floris_label in fi_dict:
(fi, floris_color, floris_marker) = fi_dict[floris_label]
df_sowfa[floris_label] = 0
df_sowfa[floris_label] = df_sowfa[floris_label].astype(object)
for i, row in df_sowfa.iterrows():
# Match the layout, wind_speed and TI
fi.reinitialize_flow_field(
layout_array=[row.layout_x, row.layout_y],
wind_speed=[row.floris_U0],
turbulence_intensity=[row.floris_TI],
)
# Calculate wake with certain yaw
fi.calculate_wake(yaw_angles=row.yaw)
# Save the result
df_sowfa.at[i, floris_label] = np.round(
np.array(fi.get_turbine_power()) / 1000.0, 2
)
# Compare the turbine powers by case
num_cases = df_sowfa.shape[0]
num_col = np.min([4, num_cases])
num_row = int(np.ceil(num_cases / num_col))
fig, axarr = plt.subplots(num_row, num_col, figsize=(10, 5), sharex=True, sharey=True)
axarr = axarr.flatten()
for idx, (i, row) in enumerate(df_sowfa.iterrows()):
ax = axarr[idx]
# Plot the sowfa result
ax.plot(row.power, "ks-", label="SOWFA")
# Plot the FLORIS results
for floris_label in fi_dict:
(fi, floris_color, floris_marker) = fi_dict[floris_label]
ax.plot(
row[floris_label],
color=floris_color,
marker=floris_marker,
label=floris_label,
)
ax.set_title('%s-%s-%s' % tuple(str(r) for r in np.round(row.yaw, 1)))
ax.grid(True)
ax.set_xlabel("Turbine")
ax.set_ylabel("Power (kW)")
axarr[0].legend()
plt.savefig("power8.png", format='png', bbox_inches='tight', dpi=150)
# Compare the change in total power
fig, ax = plt.subplots(figsize=(7, 4))
case_names = df_sowfa.yaw.apply(lambda x: "/".join(x.astype(int).astype(str)))
sowfa_total = df_sowfa.power.apply(np.sum)
ax.plot(sowfa_total, case_names, "ks-", label="SOWFA")
# Plot the FLORIS results
for floris_label in fi_dict:
(fi, floris_color, floris_marker) = fi_dict[floris_label]
total = df_sowfa[floris_label].apply(np.sum)
ax.plot(
total, case_names, color=floris_color, marker=floris_marker, label=floris_label
)
ax.grid(True)
ax.set_xlabel("Total Power (kW)")
ax.set_ylabel("Case")
ax.legend()
fig.tight_layout()
plt.savefig("power9.png", format='png', bbox_inches='tight', dpi=150)
# Compare the change in normalized power
df_baseline = df_sowfa[df_sowfa.yaw.apply(lambda x: np.max(np.abs(x))) == 0.0]
fig, ax = plt.subplots(figsize=(7, 4))
case_names = df_sowfa.yaw.apply(lambda x: "/".join(x.astype(int).astype(str)))
sowfa_total = df_sowfa.power.apply(np.sum)
# Normalize
base_total = df_baseline.power.apply(np.sum).values[0]
sowfa_total = sowfa_total / base_total
ax.plot(sowfa_total, case_names, "ks-", label="SOWFA")
# Plot the FLORIS results
for floris_label in fi_dict:
(fi, floris_color, floris_marker) = fi_dict[floris_label]
total = df_sowfa[floris_label].apply(np.sum)
# Normalize
base_total = df_baseline[floris_label].apply(np.sum).values[0]
total = total / base_total
ax.plot(
total, case_names, color=floris_color, marker=floris_marker, label=floris_label
)
ax.grid(True)
ax.set_xlabel("Normalized Power")
ax.set_ylabel("Case")
ax.legend()
fig.tight_layout()
plt.savefig("power10.png", format='png', bbox_inches='tight', dpi=150)
plt.show()
# Write out SOWFA results
sowfa_results = np.array(
[
[1940, 843.9, 856.9, 893.1, 926.2, 0, 0, 0, 0, 0],
[1575.3, 1247.3, 1008.4, 955.4, 887.1, 25, 0, 0, 0, 0],
[1576.4, 1065, 1147.5, 1185.2, 1198.5, 25, 20, 15, 10, 0],
[1577, 986.9, 1338.7, 1089.4, 999.8, 25, 25, 0, 0, 0],
[1941.1, 918.6, 945.3, 948, 968.2, 0, 0, 0, 0, 0],
]
)
df_sowfa = pd.DataFrame(
sowfa_results, columns=["p0", "p1", "p2", "p3", "p4", "y0", "y1", "y2", "y3", "y4"]
)
# # SET UP FLORIS AND MATCH TO BASE CASE
# wind_speed = 8.38
# TI = 0.09
# # Initialize the FLORIS interface fi, use default model
# fi = wfct.floris_interface.FlorisInterface("../../example_input.json")
# fi.reinitialize_flow_field(wind_speed=[wind_speed],turbulence_intensity=[TI],layout_array=(layout_x, layout_y))
# # Setup alternative with gch off
# fi_b = copy.deepcopy(fi)
# fi_b.set_gch(False)
# # Setup the previous defaul
# fi_gl = wfct.floris_interface.FlorisInterface("../../other_jsons/input_legacy.json")
# fi_gl.reinitialize_flow_field(wind_speed=[wind_speed],turbulence_intensity=[TI],layout_array=(layout_x, layout_y))
# # Compare yaw combinations
# yaw_combinations = [
# (0,0,0,0,0), (25,0,0,0,0), (25,25,0,0,0)
# ]
# yaw_names = ['%d/%d/%d/%d/%d' % yc for yc in yaw_combinations]
# # Plot individual turbine powers
# fig, axarr = plt.subplots(1,3,sharex=True,sharey=True,figsize=(12,5))
# total_sowfa = []
# total_gch_on = []
# total_gch_off = []
# total_legacy = []
# for y_idx, yc in enumerate(yaw_combinations):
# # Collect SOWFA DATA
# s_data = df_sowfa[(df_sowfa.y0==yc[0]) & (df_sowfa.y1==yc[1]) & (df_sowfa.y2==yc[2]) & (df_sowfa.y2==yc[3]) & (df_sowfa.y2==yc[4])]
# s_data = [s_data.p0.values[0], s_data.p1.values[0],s_data.p2.values[0],s_data.p3.values[0],s_data.p4.values[0]]
# total_sowfa.append(np.sum(s_data))
# # Collect GCH ON data
# fi.calculate_wake(yaw_angles=yc)
# g_data = np.array(fi.get_turbine_power())/ 1000.
# total_gch_on.append(np.sum(g_data))
# # Collect GCH OFF data
# fi_b.calculate_wake(yaw_angles=yc)
# b_data = np.array(fi_b.get_turbine_power())/ 1000.
# total_gch_off.append(np.sum(b_data))
# # Collect Legacy data
# fi_b.calculate_wake(yaw_angles=yc)
# b_data = np.array(fi_b.get_turbine_power())/ 1000.
# total_gch_off.append(np.sum(b_data))
# ax = axarr[y_idx]
# ax.set_title(yc)
# ax.plot(['T0','T1','T2','T3','T4'], s_data,'k',marker='s',label='SOWFA')
# ax.plot(['T0','T1','T2','T3','T4'], g_data,'g',marker='o',label='GCH ON')
# ax.plot(['T0','T1','T2','T3','T4'], b_data,'b',marker='*',label='GCH OFF')
# axarr[-1].legend()
# # Calculate totals and normalized totals
# total_sowfa = np.array(total_sowfa)
# nom_sowfa = total_sowfa/total_sowfa[0]
# total_gch_on = np.array(total_gch_on)
# nom_gch_on = total_gch_on/total_gch_on[0]
# total_gch_off = np.array(total_gch_off)
# nom_gch_off = total_gch_off/total_gch_off[0]
# fig, axarr = plt.subplots(1,2,sharex=True,sharey=False,figsize=(8,5))
# # Show results
# ax = axarr[0]
# ax.set_title("Total Power")
# ax.plot(yaw_names,total_sowfa,'k',marker='s',label='SOWFA',ls='None')
# ax.axhline(total_sowfa[0],color='k',ls='--')
# ax.plot(yaw_names,total_gch_on,'g',marker='o',label='GCH ON',ls='None')
# ax.axhline(total_gch_on[0],color='g',ls='--')
# ax.plot(yaw_names,total_gch_off,'b',marker='*',label='GCH OFF',ls='None')
# ax.axhline(total_gch_off[0],color='b',ls='--')
# ax.legend()
# # Normalized results
# ax = axarr[1]
# ax.set_title("Normalized Power")
# ax.plot(yaw_names,nom_sowfa,'k',marker='s',label='SOWFA',ls='None')
# ax.axhline(nom_sowfa[0],color='k',ls='--')
# ax.plot(yaw_names,nom_gch_on,'g',marker='o',label='GCH ON',ls='None')
# ax.axhline(nom_gch_on[0],color='g',ls='--')
# ax.plot(yaw_names,nom_gch_off,'b',marker='*',label='GCH OFF',ls='None')
# ax.axhline(nom_gch_off[0],color='b',ls='--')
# plt.show()
| [
"pandas.read_pickle",
"numpy.abs",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"numpy.array",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.round",
"matplotlib.pyplot.show"
] | [((1114, 1166), 'pandas.read_pickle', 'pd.read_pickle', (['"""../sowfa_data_set/sowfa_data_set.p"""'], {}), "('../sowfa_data_set/sowfa_data_set.p')\n", (1128, 1166), True, 'import pandas as pd\n'), ((2576, 2598), 'numpy.min', 'np.min', (['[4, num_cases]'], {}), '([4, num_cases])\n', (2582, 2598), True, 'import numpy as np\n'), ((2656, 2729), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_row', 'num_col'], {'figsize': '(10, 5)', 'sharex': '(True)', 'sharey': '(True)'}), '(num_row, num_col, figsize=(10, 5), sharex=True, sharey=True)\n', (2668, 2729), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3429), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""power8.png"""'], {'format': '"""png"""', 'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "('power8.png', format='png', bbox_inches='tight', dpi=150)\n", (3371, 3429), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3505), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 4)'}), '(figsize=(7, 4))\n', (3489, 3505), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4127), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""power9.png"""'], {'format': '"""png"""', 'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "('power9.png', format='png', bbox_inches='tight', dpi=150)\n", (4069, 4127), True, 'import matplotlib.pyplot as plt\n'), ((4259, 4287), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 4)'}), '(figsize=(7, 4))\n', (4271, 4287), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5133), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""power10.png"""'], {'format': '"""png"""', 'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "('power10.png', format='png', bbox_inches='tight', dpi=150)\n", (5074, 5133), True, 'import matplotlib.pyplot as plt\n'), ((5134, 5144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5142, 5144), True, 'import matplotlib.pyplot as plt\n'), ((5189, 5485), 'numpy.array', 'np.array', (['[[1940, 843.9, 856.9, 893.1, 926.2, 0, 0, 0, 0, 0], [1575.3, 1247.3, 1008.4,\n 955.4, 887.1, 25, 0, 0, 0, 0], [1576.4, 1065, 1147.5, 1185.2, 1198.5, \n 25, 20, 15, 10, 0], [1577, 986.9, 1338.7, 1089.4, 999.8, 25, 25, 0, 0, \n 0], [1941.1, 918.6, 945.3, 948, 968.2, 0, 0, 0, 0, 0]]'], {}), '([[1940, 843.9, 856.9, 893.1, 926.2, 0, 0, 0, 0, 0], [1575.3, \n 1247.3, 1008.4, 955.4, 887.1, 25, 0, 0, 0, 0], [1576.4, 1065, 1147.5, \n 1185.2, 1198.5, 25, 20, 15, 10, 0], [1577, 986.9, 1338.7, 1089.4, 999.8,\n 25, 25, 0, 0, 0], [1941.1, 918.6, 945.3, 948, 968.2, 0, 0, 0, 0, 0]])\n', (5197, 5485), True, 'import numpy as np\n'), ((5536, 5637), 'pandas.DataFrame', 'pd.DataFrame', (['sowfa_results'], {'columns': "['p0', 'p1', 'p2', 'p3', 'p4', 'y0', 'y1', 'y2', 'y3', 'y4']"}), "(sowfa_results, columns=['p0', 'p1', 'p2', 'p3', 'p4', 'y0',\n 'y1', 'y2', 'y3', 'y4'])\n", (5548, 5637), True, 'import pandas as pd\n'), ((2613, 2641), 'numpy.ceil', 'np.ceil', (['(num_cases / num_col)'], {}), '(num_cases / num_col)\n', (2620, 2641), True, 'import numpy as np\n'), ((4229, 4238), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (4235, 4238), True, 'import numpy as np\n'), ((3240, 3260), 'numpy.round', 'np.round', (['row.yaw', '(1)'], {}), '(row.yaw, 1)\n', (3248, 3260), True, 'import numpy as np\n')] |
from typing import Optional, Tuple, Union
import numpy as np
from numpy import array
import gdsfactory as gf
from gdsfactory.components.text import text
from gdsfactory.types import Anchor, Layer
Coordinate = Union[Tuple[float, float], array]
@gf.cell_without_validator
def die_bbox_frame(
bbox: Tuple[Coordinate, Coordinate] = ((-1.0, -1.0), (3.0, 4.0)),
street_width: float = 100.0,
street_length: float = 1000.0,
die_name: Optional[str] = None,
text_size: float = 100.0,
text_anchor: Anchor = "sw",
layer: Layer = (49, 0),
padding: float = 10.0,
) -> gf.Component:
"""Return boundary box frame. Perfect for defining dicing lanes.
the boundary of the chip/die
it can also add a label with the name of the die.
similar to die and bbox
adapted from phidl.geometry
Args:
bbox: bounding box to frame. Component.bbox
street_width: Width of the boundary box
street_length: length of the boundary box
die_name: Label text.
text_size: Label text size.
text_anchor: {'nw', 'nc', 'ne', 'sw', 'sc', 'se'} text location.
layer: Specific layer(s) to put polygon geometry on.
padding: adds padding
"""
D = gf.Component()
(xmin, ymin), (xmax, ymax) = bbox
x = (xmax + xmin) / 2
y = (ymax + ymin) / 2
sx = xmax - xmin
sy = ymax - ymin
sx = sx / 2
sy = sy / 2
sx += street_width + padding
sy += street_width + padding
street_length = max([sx, sy])
xpts = np.array(
[
sx,
sx,
sx - street_width,
sx - street_width,
sx - street_length,
sx - street_length,
]
)
ypts = np.array(
[
sy,
sy - street_length,
sy - street_length,
sy - street_width,
sy - street_width,
sy,
]
)
D.add_polygon([+xpts, +ypts], layer=layer)
D.add_polygon([-xpts, +ypts], layer=layer)
D.add_polygon([+xpts, -ypts], layer=layer)
D.add_polygon([-xpts, -ypts], layer=layer)
if die_name:
t = D.add_ref(text(text=die_name, size=text_size, layer=layer))
d = street_width + 20
if text_anchor == "nw":
t.xmin, t.ymax = [-sx + d, sy - d]
elif text_anchor == "nc":
t.x, t.ymax = [0, sy - d]
elif text_anchor == "ne":
t.xmax, t.ymax = [sx - d, sy - d]
if text_anchor == "sw":
t.xmin, t.ymin = [-sx + d, -sy + d]
elif text_anchor == "sc":
t.x, t.ymin = [0, -sy + d]
elif text_anchor == "se":
t.xmax, t.ymin = [sx - d, -sy + d]
return D.move((x, y)).flatten()
if __name__ == "__main__":
c = gf.Component("demo")
mask = c << gf.components.array(rows=15, columns=10)
c << die_bbox_frame(mask.bbox, die_name="chip99")
c.show()
| [
"gdsfactory.components.text.text",
"gdsfactory.Component",
"numpy.array",
"gdsfactory.components.array"
] | [((1230, 1244), 'gdsfactory.Component', 'gf.Component', ([], {}), '()\n', (1242, 1244), True, 'import gdsfactory as gf\n'), ((1525, 1625), 'numpy.array', 'np.array', (['[sx, sx, sx - street_width, sx - street_width, sx - street_length, sx -\n street_length]'], {}), '([sx, sx, sx - street_width, sx - street_width, sx - street_length,\n sx - street_length])\n', (1533, 1625), True, 'import numpy as np\n'), ((1730, 1830), 'numpy.array', 'np.array', (['[sy, sy - street_length, sy - street_length, sy - street_width, sy -\n street_width, sy]'], {}), '([sy, sy - street_length, sy - street_length, sy - street_width, sy -\n street_width, sy])\n', (1738, 1830), True, 'import numpy as np\n'), ((2772, 2792), 'gdsfactory.Component', 'gf.Component', (['"""demo"""'], {}), "('demo')\n", (2784, 2792), True, 'import gdsfactory as gf\n'), ((2809, 2849), 'gdsfactory.components.array', 'gf.components.array', ([], {'rows': '(15)', 'columns': '(10)'}), '(rows=15, columns=10)\n', (2828, 2849), True, 'import gdsfactory as gf\n'), ((2152, 2200), 'gdsfactory.components.text.text', 'text', ([], {'text': 'die_name', 'size': 'text_size', 'layer': 'layer'}), '(text=die_name, size=text_size, layer=layer)\n', (2156, 2200), False, 'from gdsfactory.components.text import text\n')] |
import tensorflow as tf
import numpy as np
import random
import matplotlib.pyplot as plt
from zipfile import ZipFile
random.seed(1337)
np.random.seed(1337)
tf.random.set_seed(1337)
with ZipFile("archive.zip","r") as zip:
zip.extractall()
BATCH_SIZE = 32
IMG_SIZE = (160,160)
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
"train",
shuffle = True,
image_size = IMG_SIZE,
batch_size = BATCH_SIZE,
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
"val",
shuffle = True,
image_size = IMG_SIZE,
batch_size = BATCH_SIZE,
)
val_batches = tf.data.experimental.cardinality(val_ds)
test_ds = val_ds.take(val_batches//5)
val_ds = val_ds.skip(val_batches//5)
class_names = train_ds.class_names
plt.figure(figsize=(10,10))
for images,labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3,3,i+1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
plt.show()
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
])
plt.figure(figsize=(10,10))
for images,_ in train_ds.take(2):
for i in range(9):
ax = plt.subplot(3,3,i+1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off")
plt.show()
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE)
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
IMG_SHAPE = (160,160,3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,include_top=False,weights="imagenet")
image_batch,label_batch = next(iter(train_ds))
feature_batch = base_model(image_batch)
base_model.trainable = False
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
prediction_layer = tf.keras.layers.Dense(2)
prediction_batch = prediction_layer(feature_batch_average)
def get_model():
inputs = tf.keras.Input(shape=(160,160,3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x,training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2,seed=1337)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs,outputs)
return model
model = get_model()
model.summary()
model.compile(tf.keras.optimizers.Adam(),tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=["accuracy"])
if __name__=="__main__":
initial_epochs = 1
loss0,accuracy0 = model.evaluate(val_ds)
print("Initial loss: {:.2f} %".format(100*loss0))
print("Initial accuracy: {:.2f} %".format(100*accuracy0))
checkpoint = tf.keras.callbacks.ModelCheckpoint("airbus.h5",save_weights_only=False,monitor="val_accuracy",save_best_only=True)
model.fit(train_ds,epochs=initial_epochs,validation_data=val_ds,callbacks=[checkpoint])
best = tf.keras.models.load_model("airbus.h5")
loss,accuracy = best.evaluate(test_ds)
print("\nTest accuracy: {:.2f} %".format(100*accuracy))
print("Test loss: {:.2f} %".format(100*loss))
| [
"tensorflow.data.experimental.cardinality",
"zipfile.ZipFile",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.preprocessing.image_dataset_from_directory",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"tenso... | [((118, 135), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (129, 135), False, 'import random\n'), ((136, 156), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (150, 156), True, 'import numpy as np\n'), ((157, 181), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(1337)'], {}), '(1337)\n', (175, 181), True, 'import tensorflow as tf\n'), ((292, 414), 'tensorflow.keras.preprocessing.image_dataset_from_directory', 'tf.keras.preprocessing.image_dataset_from_directory', (['"""train"""'], {'shuffle': '(True)', 'image_size': 'IMG_SIZE', 'batch_size': 'BATCH_SIZE'}), "('train', shuffle=True,\n image_size=IMG_SIZE, batch_size=BATCH_SIZE)\n", (343, 414), True, 'import tensorflow as tf\n'), ((434, 554), 'tensorflow.keras.preprocessing.image_dataset_from_directory', 'tf.keras.preprocessing.image_dataset_from_directory', (['"""val"""'], {'shuffle': '(True)', 'image_size': 'IMG_SIZE', 'batch_size': 'BATCH_SIZE'}), "('val', shuffle=True,\n image_size=IMG_SIZE, batch_size=BATCH_SIZE)\n", (485, 554), True, 'import tensorflow as tf\n'), ((579, 619), 'tensorflow.data.experimental.cardinality', 'tf.data.experimental.cardinality', (['val_ds'], {}), '(val_ds)\n', (611, 619), True, 'import tensorflow as tf\n'), ((732, 760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (742, 760), True, 'import matplotlib.pyplot as plt\n'), ((949, 959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (957, 959), True, 'import matplotlib.pyplot as plt\n'), ((1077, 1105), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1087, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1274, 1284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1282, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1571, 1670), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': 'IMG_SHAPE', 'include_top': '(False)', 'weights': '"""imagenet"""'}), "(input_shape=IMG_SHAPE, include_top=False,\n weights='imagenet')\n", (1604, 1670), True, 'import tensorflow as tf\n'), ((1827, 1867), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (1865, 1867), True, 'import tensorflow as tf\n'), ((1948, 1972), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {}), '(2)\n', (1969, 1972), True, 'import tensorflow as tf\n'), ((188, 215), 'zipfile.ZipFile', 'ZipFile', (['"""archive.zip"""', '"""r"""'], {}), "('archive.zip', 'r')\n", (195, 215), False, 'from zipfile import ZipFile\n'), ((2060, 2095), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(160, 160, 3)'}), '(shape=(160, 160, 3))\n', (2074, 2095), True, 'import tensorflow as tf\n'), ((2300, 2331), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2314, 2331), True, 'import tensorflow as tf\n'), ((2396, 2422), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (2420, 2422), True, 'import tensorflow as tf\n'), ((2423, 2486), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2468, 2486), True, 'import tensorflow as tf\n'), ((2721, 2842), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['"""airbus.h5"""'], {'save_weights_only': '(False)', 'monitor': '"""val_accuracy"""', 'save_best_only': '(True)'}), "('airbus.h5', save_weights_only=False,\n monitor='val_accuracy', save_best_only=True)\n", (2755, 2842), True, 'import tensorflow as tf\n'), ((2933, 2972), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""airbus.h5"""'], {}), "('airbus.h5')\n", (2959, 2972), True, 'import tensorflow as tf\n'), ((826, 850), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(i + 1)'], {}), '(3, 3, i + 1)\n', (837, 850), True, 'import matplotlib.pyplot as plt\n'), ((897, 930), 'matplotlib.pyplot.title', 'plt.title', (['class_names[labels[i]]'], {}), '(class_names[labels[i]])\n', (906, 930), True, 'import matplotlib.pyplot as plt\n'), ((933, 948), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (941, 948), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1071), 'tensorflow.keras.layers.experimental.preprocessing.RandomFlip', 'tf.keras.layers.experimental.preprocessing.RandomFlip', (['"""horizontal"""'], {}), "('horizontal')\n", (1057, 1071), True, 'import tensorflow as tf\n'), ((1175, 1199), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(i + 1)'], {}), '(3, 3, i + 1)\n', (1186, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1273), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1266, 1273), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2257), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {'seed': '(1337)'}), '(0.2, seed=1337)\n', (2241, 2257), True, 'import tensorflow as tf\n')] |
"""
Classes to play sounds and tones on pygame
class SoundPlayer : manage a FIFO queue to play sounds from ogg files in a dedicated channel.
- load(name, filename): method that loads an ogg file 'filename' and associates the name 'name' to that sound
- play(name=None): if name not None, enqueue the corresponding sound in the FIFO. If the channel is not busy, play the next sound from the FIFO
class Tone : to play a sinusoidal wave in a dedicated channel. Methods 'on' and 'off' to play or stop the tone.
"""
import pygame
from time import sleep
import logging
import sys
import numpy as np
log = logging.getLogger("PygameAudio")
class PygameAudio:
_init = False
_channels_used = 0
def __init__(self, sampleRate=22050, debug=False):
if debug:
log.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter(fmt='%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',
datefmt="%H:%M:%S"))
log.addHandler(ch)
print("_channels_used", self._channels_used)
self.sampleRate = sampleRate
if not PygameAudio._init:
log.info(f"mixer init - sampleRate: {sampleRate}")
pygame.mixer.init(sampleRate, -16, 1, 128)
PygameAudio._init = True
self._channel_id = PygameAudio._channels_used
log.info(f"init channel {self._channel_id}")
self._channel = pygame.mixer.Channel(self._channel_id)
PygameAudio._channels_used += 1
class SoundPlayer(PygameAudio):
def __init__(self, debug=False):
super().__init__(debug=debug)
self._raw_sounds = {}
self._fifo_sounds = []
self._debug = debug
def load(self, name, filename):
log.info(f"loading {name} {filename}")
self._raw_sounds[name] = pygame.mixer.Sound(filename)
def play(self, name=None):
if name is not None:
self._fifo_sounds.append((name, self._raw_sounds[name]))
log.info(f"queuing '{name}' (remaining: {len(self._fifo_sounds)}) ")
if len(self._fifo_sounds) > 0:
if not self._channel.get_busy():
name, sound = self._fifo_sounds.pop(0)
log.info(f"playing '{name}' on channel {self._channel_id} (remaining: {len(self._fifo_sounds)})")
self._channel.queue(sound)
class Tone(PygameAudio):
def __init__(self, freq=440, debug=False):
super().__init__(debug=debug)
self.freq = freq
arr = np.array([4096 * np.sin(2.0 * np.pi * 440 * x / self.sampleRate) for x in range(0, self.sampleRate)]).astype(np.int16)
self.sound = pygame.sndarray.make_sound(arr)
def on(self):
log.info(f"play tone {self.freq}Hz on channel {self._channel_id}")
self._channel.play(self.sound, -1)
def off(self):
log.info(f"stop tone {self.freq}Hz on channel {self._channel_id}")
self._channel.stop()
if __name__ == '__main__':
import random
sp = SoundPlayer(debug=True)
t = Tone()
sp.load("hello", "../assets/sounds/hello.ogg")
sp.load("bonjour", "../assets/sounds/bonjour.ogg")
sp.play("hello")
sp.play("hello")
sp.play("hello")
sp.play("hello")
prev_on = 0
for i in range(10):
print(i)
on = random.randint(0, 1)
if on != prev_on:
if on:
t.on()
else:
t.off()
prev_on = on
if i == 2:
sp.play("bonjour")
sp.play("bonjour")
sp.play()
sleep(2)
if prev_on: t.off()
| [
"logging.getLogger",
"logging.StreamHandler",
"pygame.mixer.Channel",
"logging.Formatter",
"pygame.mixer.Sound",
"time.sleep",
"pygame.sndarray.make_sound",
"numpy.sin",
"pygame.mixer.init",
"random.randint"
] | [((663, 695), 'logging.getLogger', 'logging.getLogger', (['"""PygameAudio"""'], {}), "('PygameAudio')\n", (680, 695), False, 'import logging\n'), ((1430, 1468), 'pygame.mixer.Channel', 'pygame.mixer.Channel', (['self._channel_id'], {}), '(self._channel_id)\n', (1450, 1468), False, 'import pygame\n'), ((1776, 1804), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['filename'], {}), '(filename)\n', (1794, 1804), False, 'import pygame\n'), ((2498, 2529), 'pygame.sndarray.make_sound', 'pygame.sndarray.make_sound', (['arr'], {}), '(arr)\n', (2524, 2529), False, 'import pygame\n'), ((3073, 3093), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3087, 3093), False, 'import random\n'), ((3244, 3252), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3249, 3252), False, 'from time import sleep\n'), ((855, 888), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (876, 888), False, 'import logging\n'), ((1246, 1288), 'pygame.mixer.init', 'pygame.mixer.init', (['sampleRate', '(-16)', '(1)', '(128)'], {}), '(sampleRate, -16, 1, 128)\n', (1263, 1288), False, 'import pygame\n'), ((937, 1055), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s"""', 'datefmt': '"""%H:%M:%S"""'}), "(fmt=\n '%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',\n datefmt='%H:%M:%S')\n", (954, 1055), False, 'import logging\n'), ((2381, 2428), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * 440 * x / self.sampleRate)'], {}), '(2.0 * np.pi * 440 * x / self.sampleRate)\n', (2387, 2428), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, <NAME>
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import numpy as np
from visvis.wobjects.polygonalModeling import BaseMesh
def combineMeshes(meshes):
""" combineMeshes(meshes)
Given a list of mesh objects, produces a combined mesh.
"""
if not meshes:
raise ValueError('No meshes or empty meshes given')
# Check mesh simularity
vpf = 0
hasNormals = True
hasFaces = True
hasValues = True
#
for mesh in meshes:
if vpf == 0:
# First mesh: init
hasFaces = (mesh._faces is not None)
vpf = mesh._verticesPerFace
else:
# Compare with first
if mesh._verticesPerFace != vpf:
raise ValueError('Cannot combine meshes with different verticesPerFace.')
if (mesh._faces is not None) != hasFaces:
raise ValueError('Cannot combine meshes with and without face data.')
if True:
# Compare always
hasNormals = hasNormals and (mesh._normals is not None)
hasValues = hasValues and (mesh._values is not None)
# Combine vertices
vertices = np.concatenate( [m._vertices for m in meshes] )
# Combine faces
faces = None
if hasFaces:
facesList = []
startIndex = 0
for mesh in meshes:
facesList.append( mesh._faces + startIndex )
startIndex += mesh._vertices.shape[0]
faces = np.concatenate( facesList )
# Combine normals
normals = None
if hasNormals:
normals = np.concatenate( [m._normals for m in meshes] )
# Combine values
values = None
if hasValues:
values = np.concatenate( [m._values for m in meshes] )
# Done
return BaseMesh(vertices, faces, normals, values, vpf)
| [
"visvis.wobjects.polygonalModeling.BaseMesh",
"numpy.concatenate"
] | [((1288, 1333), 'numpy.concatenate', 'np.concatenate', (['[m._vertices for m in meshes]'], {}), '([m._vertices for m in meshes])\n', (1302, 1333), True, 'import numpy as np\n'), ((1914, 1961), 'visvis.wobjects.polygonalModeling.BaseMesh', 'BaseMesh', (['vertices', 'faces', 'normals', 'values', 'vpf'], {}), '(vertices, faces, normals, values, vpf)\n', (1922, 1961), False, 'from visvis.wobjects.polygonalModeling import BaseMesh\n'), ((1604, 1629), 'numpy.concatenate', 'np.concatenate', (['facesList'], {}), '(facesList)\n', (1618, 1629), True, 'import numpy as np\n'), ((1715, 1759), 'numpy.concatenate', 'np.concatenate', (['[m._normals for m in meshes]'], {}), '([m._normals for m in meshes])\n', (1729, 1759), True, 'import numpy as np\n'), ((1841, 1884), 'numpy.concatenate', 'np.concatenate', (['[m._values for m in meshes]'], {}), '([m._values for m in meshes])\n', (1855, 1884), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
from ftl.experiment import run_exp
import argparse
import os
import numpy as np
import json
from numpyencoder import NumpyEncoder
import pickle
def _parse_args():
parser = argparse.ArgumentParser(description='driver.py')
# Client Opt Params
parser.add_argument('--server_config', type=str, default='./configs/server_config.json')
parser.add_argument('--client_config', type=str, default='./configs/client_config.json')
# Results Related Params
parser.add_argument('--o', type=str, default='result_default', help='Pass results location')
parser.add_argument('--n_repeat', type=int, default=1, help='Specify number of repeat runs')
args = parser.parse_args()
return args
def run_main():
args = _parse_args()
print(args)
client_config = json.load(open(args.client_config))
server_config = json.load(open(args.server_config))
print('# ------------------------------------------------- #')
print('# Config #')
print('# ------------------------------------------------- #')
print('Server:\n{}'.format(json.dumps(server_config, indent=4)), flush=True)
print('Client:\n{}'.format(json.dumps(client_config, indent=4)), flush=True)
directory = "result_dumps/" + client_config["data_config"]["data_set"] + "/" + \
client_config["learner_config"]["net"] + "/"
if not os.path.exists(directory):
os.makedirs(directory)
results = {}
for random_seed in np.arange(1, args.n_repeat + 1):
client_config["data_config"]["seed"] = random_seed
results["client_config"] = client_config
results["server_config"] = server_config
loss, val_acc, test_acc, sv, alpha, best_val, best_test, lowest_loss, grad_kl_div = \
run_exp(client_config=client_config, server_config=server_config)
results["loss"] = loss
results["val_acc"] = val_acc
results["test_acc"] = test_acc
results["sv"] = sv
results["sv_wt"] = alpha
results["best_val_acc"] = best_val
results["best_test_acc"] = best_test
results["lowest_epoch_loss"] = lowest_loss
results["grad_kl_div"] = grad_kl_div
print(results)
with open(directory + args.o, 'wb') as f:
pickle.dump(results, f)
# json.dump(results, f, indent=4, ensure_ascii=False, cls=NumpyEncoder)
if __name__ == '__main__':
run_main()
| [
"os.path.exists",
"pickle.dump",
"os.makedirs",
"argparse.ArgumentParser",
"json.dumps",
"ftl.experiment.run_exp",
"numpy.arange"
] | [((250, 298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""driver.py"""'}), "(description='driver.py')\n", (273, 298), False, 'import argparse\n'), ((1574, 1605), 'numpy.arange', 'np.arange', (['(1)', '(args.n_repeat + 1)'], {}), '(1, args.n_repeat + 1)\n', (1583, 1605), True, 'import numpy as np\n'), ((1475, 1500), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1489, 1500), False, 'import os\n'), ((1510, 1532), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1521, 1532), False, 'import os\n'), ((1870, 1935), 'ftl.experiment.run_exp', 'run_exp', ([], {'client_config': 'client_config', 'server_config': 'server_config'}), '(client_config=client_config, server_config=server_config)\n', (1877, 1935), False, 'from ftl.experiment import run_exp\n'), ((2361, 2384), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (2372, 2384), False, 'import pickle\n'), ((1186, 1221), 'json.dumps', 'json.dumps', (['server_config'], {'indent': '(4)'}), '(server_config, indent=4)\n', (1196, 1221), False, 'import json\n'), ((1267, 1302), 'json.dumps', 'json.dumps', (['client_config'], {'indent': '(4)'}), '(client_config, indent=4)\n', (1277, 1302), False, 'import json\n')] |
# ===============================================================================
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from numpy import zeros, percentile, array, random, abs as nabs, column_stack
from scipy.stats import norm
# ============= local library imports ==========================
class MonteCarloEstimator(object):
def __init__(self, ntrials, regressor, seed=None):
self.regressor = regressor
self.ntrials = ntrials
self.seed = seed
def _calculate(self, nominal_ys, ps):
res = nominal_ys - ps
pct = (15.87, 84.13)
a, b = array([percentile(ri, pct) for ri in res.T]).T
a, b = nabs(a), nabs(b)
return (a + b) * 0.5
def _get_dist(self, n, npts):
if self.seed:
random.seed(self.seed)
ndist = norm()
ntrials = self.ntrials
ga = ndist.rvs((ntrials, n))
ps = zeros((ntrials, npts))
return ndist, ga, ps
def _estimate(self, pts, pexog, ys=None, yserr=None):
reg = self.regressor
nominal_ys = reg.predict(pts)
if ys is None:
ys = reg.ys
if yserr is None:
yserr = reg.yserr
n, npts = len(ys), len(pts)
ntrials = self.ntrials
ndist, ga, ps = self._get_dist(n, npts)
pred = reg.fast_predict2
yp = ys + yserr * ga
if hasattr(pexog, '__call__'):
for i in range(ntrials):
ps[i] = pred(yp[i], pexog(i))
else:
for i in range(ntrials):
ps[i] = pred(yp[i], pexog)
return nominal_ys, self._calculate(nominal_ys, ps)
class RegressionEstimator(MonteCarloEstimator):
def estimate(self, pts):
reg = self.regressor
pexog = reg.get_exog(pts)
return self._estimate(pts, pexog, ys=reg.clean_ys, yserr=reg.clean_yserr)
class FluxEstimator(MonteCarloEstimator):
def estimate_position_err(self, pts, error):
reg = self.regressor
ox, oy = pts.T
n, npts = len(reg.ys), len(pts)
ntrials = self.ntrials
ndist, ga, ps = self._get_dist(n, npts)
pgax = ndist.rvs((ntrials, npts))
pgay = ndist.rvs((ntrials, npts))
pgax *= error
pgay *= error
def get_pexog(i):
return reg.get_exog(column_stack((ox + pgax[i], oy + pgay[i])))
return self._estimate(pts, get_pexog, yserr=0)
def estimate(self, pts):
reg = self.regressor
pexog = reg.get_exog(pts)
return self._estimate(pts, pexog)
# ============= EOF =============================================
| [
"numpy.abs",
"scipy.stats.norm",
"numpy.column_stack",
"numpy.zeros",
"numpy.random.seed",
"numpy.percentile"
] | [((1559, 1565), 'scipy.stats.norm', 'norm', ([], {}), '()\n', (1563, 1565), False, 'from scipy.stats import norm\n'), ((1648, 1670), 'numpy.zeros', 'zeros', (['(ntrials, npts)'], {}), '((ntrials, npts))\n', (1653, 1670), False, 'from numpy import zeros, percentile, array, random, abs as nabs, column_stack\n'), ((1404, 1411), 'numpy.abs', 'nabs', (['a'], {}), '(a)\n', (1408, 1411), True, 'from numpy import zeros, percentile, array, random, abs as nabs, column_stack\n'), ((1413, 1420), 'numpy.abs', 'nabs', (['b'], {}), '(b)\n', (1417, 1420), True, 'from numpy import zeros, percentile, array, random, abs as nabs, column_stack\n'), ((1519, 1541), 'numpy.random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (1530, 1541), False, 'from numpy import zeros, percentile, array, random, abs as nabs, column_stack\n'), ((3066, 3108), 'numpy.column_stack', 'column_stack', (['(ox + pgax[i], oy + pgay[i])'], {}), '((ox + pgax[i], oy + pgay[i]))\n', (3078, 3108), False, 'from numpy import zeros, percentile, array, random, abs as nabs, column_stack\n'), ((1349, 1368), 'numpy.percentile', 'percentile', (['ri', 'pct'], {}), '(ri, pct)\n', (1359, 1368), False, 'from numpy import zeros, percentile, array, random, abs as nabs, column_stack\n')] |
import numpy as np
import h5py
import pybel
import tfbio.net
import tfbio.data
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing
from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate
from keras.models import Model
from keras import backend as K
from keras.regularizers import l2
from .data import DataWrapper, get_box_size
__all__ = [
'dice',
'dice_np',
'dice_loss',
'ovl',
'ovl_np',
'ovl_loss',
'UNet',
]
def dice(y_true, y_pred, smoothing_factor=0.01):
"""Dice coefficient adapted for continuous data (predictions) computed with
keras layers.
"""
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return ((2. * intersection + smoothing_factor)
/ (K.sum(y_true_f) + K.sum(y_pred_f) + smoothing_factor))
def dice_np(y_true, y_pred, smoothing_factor=0.01):
"""Dice coefficient adapted for continuous data (predictions) computed with
numpy arrays.
"""
intersection = (y_true * y_pred).sum()
total = y_true.sum() + y_pred.sum()
return ((2. * intersection + smoothing_factor)
/ (total + smoothing_factor))
def dice_loss(y_true, y_pred):
"""Keras loss function for Dice coefficient (loss(t, y) = -dice(t, y))"""
return -dice(y_true, y_pred)
def ovl(y_true, y_pred, smoothing_factor=0.01):
"""Overlap coefficient computed with keras layers"""
concat = K.concatenate((y_true, y_pred))
return ((K.sum(K.min(concat, axis=-1)) + smoothing_factor)
/ (K.sum(K.max(concat, axis=-1)) + smoothing_factor))
def ovl_np(y_true, y_pred, smoothing_factor=0.01):
"""Overlap coefficient computed with numpy arrays"""
concat = np.concatenate((y_true, y_pred), axis=-1)
return ((concat.min(axis=-1).sum() + smoothing_factor)
/ (concat.max(axis=-1).sum() + smoothing_factor))
def ovl_loss(y_true, y_pred):
"""Keras loss function for overlap coefficient (loss(t, y) = -ovl(t, y))"""
return -ovl(y_true, y_pred)
class UNet(Model):
"""3D Convolutional neural network based on U-Net
see: "U-Net: Convolutional Networks for Biomedical Image Segmentation"
https://arxiv.org/abs/1505.04597
"""
DEFAULT_SIZE = 36
def __init__(self, inputs=None, outputs=None, data_handle=None,
featurizer=None, scale=None, box_size=None, input_channels=None,
output_channels=None, l2_lambda=1e-3, **kwargs):
"""Creates a new network. The model can be either initialized from
`inputs` and `outputs` (keras layers), `data_handle` (DataWrapper
object, from which all the shapes are inferred) or manually using
`box_size`, `input_channels` and `output_channels` arguments. L2
regularization is used (controlled by `l2_lambda` parameter) and all
other arguments are passed to keras Model constructor.
"""
if data_handle is not None:
if not isinstance(data_handle, DataWrapper):
raise TypeError('data_handle should be a DataWrapper object,'
' got %s instead' % type(data_handle))
if box_size is None:
box_size = data_handle.box_size
elif box_size != data_handle.box_size:
raise ValueError('specified box_size does not match '
'data_handle.box_size (%s != %s)'
% (box_size, data_handle.box_size))
if input_channels is None:
input_channels = data_handle.x_channels
elif input_channels != data_handle.x_channels:
raise ValueError('specified input_channels does not match '
'data_handle.x_channels (%s != %s)'
% (input_channels, data_handle.x_channels))
if output_channels is None:
output_channels = data_handle.y_channels
elif output_channels != data_handle.y_channels:
raise ValueError('specified output_channels does not match '
'data_handle.y_channels (%s != %s)'
% (output_channels, data_handle.y_channels))
if scale is None:
self.scale = data_handle.scale
elif scale != data_handle.scale:
raise ValueError('specified scale does not match '
'data_handle.scale (%s != %s)'
% (scale, data_handle.scale))
self.max_dist = data_handle.max_dist
else:
self.scale = scale
self.max_dist = None # we'll calculate it later from box size
if featurizer is not None:
if not isinstance(featurizer, tfbio.data.Featurizer):
raise TypeError('featurizer should be a tfbio.data.Featurizer '
'object, got %s instead' % type(featurizer))
if input_channels is None:
input_channels = len(featurizer.FEATURE_NAMES)
elif input_channels != len(featurizer.FEATURE_NAMES):
raise ValueError(
'specified input_channels or data_handle.x_channels does '
'not match number of features produce by featurizer '
'(%s != %s)' % (input_channels, len(featurizer.FEATURE_NAMES)))
if inputs is not None:
if outputs is None:
raise ValueError('you must provide both inputs and outputs')
if isinstance(inputs, list):
i_shape = UNet.__total_shape(inputs)
else:
i_shape = inputs.shape
if isinstance(outputs, list):
o_shape = UNet.__total_shape(outputs)
else:
o_shape = outputs.shape
if len(i_shape) != 5:
raise ValueError('input should be 5D, got %sD instead'
% len(i_shape))
elif len(o_shape) != 5:
raise ValueError('output should be 5D, got %sD instead'
% len(o_shape))
elif i_shape[1:4] != o_shape[1:4]:
raise ValueError('input and output shapes do not match '
'(%s != %s)' % (i_shape[1:4], o_shape[1:4]))
if box_size is None:
box_size = i_shape[1]
elif i_shape[1:4] != (box_size,) * 3:
raise ValueError('input shape does not match box_size '
'(%s != %s)' % (i_shape[1:4], (box_size,) * 3))
if input_channels is not None and i_shape[4] != input_channels:
raise ValueError('number of channels (specified via featurizer'
', input_channels or data_handle) does not '
'match input shape (%s != %s)'
% (i_shape[4], input_channels))
if output_channels is not None and o_shape[4] != output_channels:
raise ValueError('specified output_channels or '
'data_handle.y_channels does not match '
'output shape (%s != %s)'
% (o_shape[4], output_channels))
else:
if outputs is not None:
raise ValueError('you must provide both inputs and outputs')
elif (box_size is None or input_channels is None
or output_channels is None):
raise ValueError('you must either provide: 1) inputs and '
'outputs (keras layers); 2) data_handle '
'(DataWrapper object); 3) box_size, '
'input_channels and output_channels')
elif (box_size < self.DEFAULT_SIZE
or box_size % self.DEFAULT_SIZE != 0):
raise ValueError('box_size does not match the default '
'architecture. Pleas scecify inputs and outputs')
params = {'kernel_size': 3, 'activation': 'relu',
'padding': 'same', 'kernel_regularizer': l2(l2_lambda)}
inputs = Input((box_size, box_size, box_size, input_channels), name='input')
conv1 = Convolution3D(filters=32, **params)(inputs)
conv1 = Convolution3D(filters=32, **params)(conv1)
pool1 = MaxPooling3D(pool_size=2)(conv1)
conv2 = Convolution3D(filters=64, **params)(pool1)
conv2 = Convolution3D(filters=64, **params)(conv2)
pool2 = MaxPooling3D(pool_size=2)(conv2)
conv3 = Convolution3D(filters=128, **params)(pool2)
conv3 = Convolution3D(filters=128, **params)(conv3)
pool3 = MaxPooling3D(pool_size=3)(conv3)
conv4 = Convolution3D(filters=256, **params)(pool3)
conv4 = Convolution3D(filters=256, **params)(conv4)
pool4 = MaxPooling3D(pool_size=3)(conv4)
conv5 = Convolution3D(filters=512, **params)(pool4)
conv5 = Convolution3D(filters=512, **params)(conv5)
up6 = concatenate([UpSampling3D(size=3)(conv5), conv4], axis=4)
conv6 = Convolution3D(filters=256, **params)(up6)
conv6 = Convolution3D(filters=256, **params)(conv6)
up7 = concatenate([UpSampling3D(size=3)(conv6), conv3], axis=4)
conv7 = Convolution3D(filters=128, **params)(up7)
conv7 = Convolution3D(filters=128, **params)(conv7)
up8 = concatenate([UpSampling3D(size=2)(conv7), conv2], axis=4)
conv8 = Convolution3D(filters=64, **params)(up8)
conv8 = Convolution3D(filters=64, **params)(conv8)
up9 = concatenate([UpSampling3D(size=2)(conv8), conv1], axis=4)
conv9 = Convolution3D(filters=32, **params)(up9)
conv9 = Convolution3D(filters=32, **params)(conv9)
outputs = Convolution3D(
filters=output_channels,
kernel_size=1,
activation='sigmoid',
kernel_regularizer=l2(l2_lambda),
name='pocket'
)(conv9)
super().__init__(inputs=inputs, outputs=outputs, **kwargs)
self.data_handle = data_handle
self.featurizer = featurizer
if self.max_dist is None and self.scale is not None:
self.max_dist = (box_size - 1) / (2 * self.scale)
@staticmethod
def __total_shape(tensor_list):
if len(tensor_list) == 1:
total_shape = tuple(tensor_list[0].shape.as_list())
else:
total_shape = (*tensor_list[0].shape.as_list()[:-1],
sum(t.shape.as_list()[-1] for t in tensor_list))
return total_shape
def save_keras(self, path):
class_name = self.__class__.__name__
self.__class__.__name__ = 'Model'
self.save(path, include_optimizer=False)
self.__class__.__name__ = class_name
@staticmethod
def load_model(path, **attrs):
"""Load model saved in HDF format"""
from keras.models import load_model as keras_load
custom_objects = {name: val for name, val in globals().items()
if name in __all__}
model = keras_load(path, custom_objects=custom_objects)
if 'data_handle' in attrs:
if not isinstance(attrs['data_handle'], DataWrapper):
raise TypeError('data_handle should be a DataWrapper object, '
'got %s instead' % type(attrs['data_handle']))
elif 'scale' not in attrs:
attrs['scale'] = attrs['data_handle'].scale
elif attrs['scale'] != attrs['data_handle'].scale:
raise ValueError('specified scale does not match '
'data_handle.scale (%s != %s)'
% (attrs['scale'], attrs['data_handle'].scale))
if 'featurizer' in attrs:
if not (isinstance(attrs['featurizer'], tfbio.data.Featurizer)):
raise TypeError(
'featurizer should be a tfbio.data.Featurizer object, '
'got %s instead' % type(attrs['featurizer']))
elif (len(attrs['featurizer'].FEATURE_NAMES)
!= attrs['data_handle'].x_channels):
raise ValueError(
'number of features produced be the featurizer does '
'not match data_handle.x_channels (%s != %s)'
% (len(attrs['featurizer'].FEATURE_NAMES),
attrs['data_handle'].x_channels))
if 'max_dist' not in attrs:
attrs['max_dist'] = attrs['data_handle'].max_dist
elif attrs['max_dist'] != attrs['data_handle'].max_dist:
raise ValueError('specified max_dist does not match '
'data_handle.max_dist (%s != %s)'
% (attrs['max_dist'],
attrs['data_handle'].max_dist))
if 'box_size' not in attrs:
attrs['box_size'] = attrs['data_handle'].box_size
elif attrs['box_size'] != attrs['data_handle'].box_size:
raise ValueError('specified box_size does not match '
'data_handle.box_size (%s != %s)'
% (attrs['box_size'],
attrs['data_handle'].box_size))
elif 'featurizer' in attrs and not (isinstance(attrs['featurizer'],
tfbio.data.Featurizer)):
raise TypeError(
'featurizer should be a tfbio.data.Featurizer object, '
'got %s instead' % type(attrs['featurizer']))
if 'scale' in attrs and 'max_dist' in attrs:
box_size = get_box_size(attrs['scale'], attrs['max_dist'])
if 'box_size' in attrs:
if not attrs['box_size'] == box_size:
raise ValueError('specified box_size does not match '
'size defined by scale and max_dist (%s != %s)'
% (attrs['box_size'], box_size))
else:
attrs['box_size'] = box_size
# TODO: add some attrs validation if handle is not specified
for attr, value in attrs.items():
setattr(model, attr, value)
return model
def pocket_density_from_mol(self, mol):
"""Predict porobability density of pockets using pybel.Molecule object
as input"""
if not isinstance(mol, pybel.Molecule):
raise TypeError('mol should be a pybel.Molecule object, got %s '
'instead' % type(mol))
if self.featurizer is None:
raise ValueError('featurizer must be set to make predistions for '
'molecules')
if self.scale is None:
raise ValueError('scale must be set to make predistions')
prot_coords, prot_features = self.featurizer.get_features(mol)
centroid = prot_coords.mean(axis=0)
prot_coords -= centroid
resolution = 1. / self.scale
x = tfbio.data.make_grid(prot_coords, prot_features,
max_dist=self.max_dist,
grid_resolution=resolution)
density = self.predict(x)
origin = (centroid - self.max_dist)
step = np.array([1.0 / self.scale] * 3)
return density, origin, step
def pocket_density_from_grid(self, pdbid):
"""Predict porobability density of pockets using 3D grid (np.ndarray)
as input"""
if self.data_handle is None:
raise ValueError('data_handle must be set to make predictions '
'using PDBIDs')
if self.scale is None:
raise ValueError('scale must be set to make predistions')
x, _ = self.data_handle.prepare_complex(pdbid)
origin = (self.data_handle[pdbid]['centroid'][:] - self.max_dist)
step = np.array([1.0 / self.scale] * 3)
density = self.predict(x)
return density, origin, step
def save_density_as_cmap(self, density, origin, step, fname='pockets.cmap',
mode='w', name='protein'):
"""Save predcited pocket density as .cmap file (which can be opened in
UCSF Chimera or ChimeraX)
"""
if len(density) != 1:
raise ValueError('saving more than one prediction at a time is not'
' supported')
density = density[0].transpose([3, 2, 1, 0])
with h5py.File(fname, mode) as cmap:
g1 = cmap.create_group('Chimera')
for i, channel_dens in enumerate(density):
g2 = g1.create_group('image%s' % (i + 1))
g2.attrs['chimera_map_version'] = 1
g2.attrs['name'] = name.encode() + b' binding sites'
g2.attrs['origin'] = origin
g2.attrs['step'] = step
g2.create_dataset('data_zyx', data=channel_dens,
shape=channel_dens.shape,
dtype='float32')
def save_density_as_cube(self, density, origin, step, fname='pockets.cube',
mode='w', name='protein'):
"""Save predcited pocket density as .cube file (format originating from
Gaussian package).
"""
angstrom2bohr = 1.889725989
if len(density) != 1:
raise ValueError('saving more than one prediction at a time is not'
' supported')
if density.shape[-1] != 1:
raise NotImplementedError('saving multichannel density is not'
' supported yet, please save each'
' channel in a separate file.')
with open(fname, 'w') as f:
f.write('%s CUBE FILE.\n' % name)
f.write('OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z\n')
f.write(' 1 %12.6f %12.6f %12.6f\n' % tuple(angstrom2bohr * origin))
f.write(
'%5i %12.6f 0.000000 0.000000\n'
'%5i 0.000000 %12.6f 0.000000\n'
'%5i 0.000000 0.000000 %12.6f\n'
% tuple(i for pair in zip(density.shape[1:4],
angstrom2bohr * step) for i in pair)
)
f.write(' 1 0.000000 %12.6f %12.6f %12.6f\n'
% tuple(angstrom2bohr * origin))
f.write('\n'.join([' '.join('%12.6f' % i for i in row)
for row in density.reshape((-1, 6))]))
def get_pockets_segmentation(self, density, threshold=0.5, min_size=50):
"""Predict pockets using specified threshold on the probability density.
Filter out pockets smaller than min_size A^3
"""
if len(density) != 1:
raise ValueError('segmentation of more than one pocket is not'
' supported')
voxel_size = (1 / self.scale) ** 3
# get a general shape, without distinguishing output channels
bw = closing((density[0] > threshold).any(axis=-1))
# remove artifacts connected to border
cleared = clear_border(bw)
# label regions
label_image, num_labels = label(cleared, return_num=True)
for i in range(1, num_labels + 1):
pocket_idx = (label_image == i)
pocket_size = pocket_idx.sum() * voxel_size
if pocket_size < min_size:
label_image[np.where(pocket_idx)] = 0
return label_image
def predict_pocket_atoms(self, mol, dist_cutoff=4.5, expand_residue=True,
**pocket_kwargs):
"""Predict pockets for a given molecule and get AAs forming them
(list pybel.Molecule objects).
Parameters
----------
mol: pybel.Molecule object
Protein structure
dist_cutoff: float, optional (default=2.0)
Maximal distance between protein atom and predicted pocket
expand_residue: bool, optional (default=True)
Inlude whole residue if at least one atom is included in the pocket
pocket_kwargs:
Keyword argument passed to `get_pockets_segmentation` method
Returns
-------
pocket_mols: list of pybel.Molecule objects
Fragments of molecule corresponding to detected pockets.
"""
from scipy.spatial.distance import cdist
coords = np.array([a.coords for a in mol.atoms])
atom2residue = np.array([a.residue.idx for a in mol.atoms])
residue2atom = np.array([[a.idx - 1 for a in r.atoms]
for r in mol.residues])
# predcit pockets
density, origin, step = self.pocket_density_from_mol(mol)
pockets = self.get_pockets_segmentation(density, **pocket_kwargs)
# find atoms close to pockets
pocket_atoms = []
for pocket_label in range(1, pockets.max() + 1):
indices = np.argwhere(pockets == pocket_label).astype('float32')
indices *= step
indices += origin
distance = cdist(coords, indices)
close_atoms = np.where((distance < dist_cutoff).any(axis=1))[0]
if len(close_atoms) == 0:
continue
if expand_residue:
residue_ids = np.unique(atom2residue[close_atoms])
close_atoms = np.concatenate(residue2atom[residue_ids])
pocket_atoms.append([int(idx) for idx in close_atoms])
# create molecules correcponding to atom indices
pocket_mols = []
# TODO optimize (copy atoms to new molecule instead of deleting?)
for pocket in pocket_atoms:
# copy molecule
pocket_mol = mol.clone
atoms_to_del = (set(range(len(pocket_mol.atoms)))
- set(pocket))
pocket_mol.OBMol.BeginModify()
for aidx in sorted(atoms_to_del, reverse=True):
atom = pocket_mol.OBMol.GetAtom(aidx + 1)
pocket_mol.OBMol.DeleteAtom(atom)
pocket_mol.OBMol.EndModify()
pocket_mols.append(pocket_mol)
return pocket_mols
| [
"keras.backend.sum",
"keras.backend.flatten",
"numpy.array",
"keras.layers.UpSampling3D",
"numpy.where",
"keras.backend.max",
"numpy.concatenate",
"skimage.measure.label",
"keras.backend.concatenate",
"skimage.segmentation.clear_border",
"h5py.File",
"keras.regularizers.l2",
"keras.layers.Co... | [((715, 732), 'keras.backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (724, 732), True, 'from keras import backend as K\n'), ((748, 765), 'keras.backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (757, 765), True, 'from keras import backend as K\n'), ((785, 811), 'keras.backend.sum', 'K.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (790, 811), True, 'from keras import backend as K\n'), ((1535, 1566), 'keras.backend.concatenate', 'K.concatenate', (['(y_true, y_pred)'], {}), '((y_true, y_pred))\n', (1548, 1566), True, 'from keras import backend as K\n'), ((1819, 1860), 'numpy.concatenate', 'np.concatenate', (['(y_true, y_pred)'], {'axis': '(-1)'}), '((y_true, y_pred), axis=-1)\n', (1833, 1860), True, 'import numpy as np\n'), ((11508, 11555), 'keras.models.load_model', 'keras_load', (['path'], {'custom_objects': 'custom_objects'}), '(path, custom_objects=custom_objects)\n', (11518, 11555), True, 'from keras.models import load_model as keras_load\n'), ((15828, 15860), 'numpy.array', 'np.array', (['([1.0 / self.scale] * 3)'], {}), '([1.0 / self.scale] * 3)\n', (15836, 15860), True, 'import numpy as np\n'), ((16448, 16480), 'numpy.array', 'np.array', (['([1.0 / self.scale] * 3)'], {}), '([1.0 / self.scale] * 3)\n', (16456, 16480), True, 'import numpy as np\n'), ((19780, 19796), 'skimage.segmentation.clear_border', 'clear_border', (['bw'], {}), '(bw)\n', (19792, 19796), False, 'from skimage.segmentation import clear_border\n'), ((19856, 19887), 'skimage.measure.label', 'label', (['cleared'], {'return_num': '(True)'}), '(cleared, return_num=True)\n', (19861, 19887), False, 'from skimage.measure import label\n'), ((21079, 21118), 'numpy.array', 'np.array', (['[a.coords for a in mol.atoms]'], {}), '([a.coords for a in mol.atoms])\n', (21087, 21118), True, 'import numpy as np\n'), ((21142, 21186), 'numpy.array', 'np.array', (['[a.residue.idx for a in mol.atoms]'], {}), '([a.residue.idx for a in mol.atoms])\n', (21150, 21186), True, 'import numpy as np\n'), ((21210, 21274), 'numpy.array', 'np.array', (['[[(a.idx - 1) for a in r.atoms] for r in mol.residues]'], {}), '([[(a.idx - 1) for a in r.atoms] for r in mol.residues])\n', (21218, 21274), True, 'import numpy as np\n'), ((8424, 8491), 'keras.layers.Input', 'Input', (['(box_size, box_size, box_size, input_channels)'], {'name': '"""input"""'}), "((box_size, box_size, box_size, input_channels), name='input')\n", (8429, 8491), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((17034, 17056), 'h5py.File', 'h5py.File', (['fname', 'mode'], {}), '(fname, mode)\n', (17043, 17056), False, 'import h5py\n'), ((21753, 21775), 'scipy.spatial.distance.cdist', 'cdist', (['coords', 'indices'], {}), '(coords, indices)\n', (21758, 21775), False, 'from scipy.spatial.distance import cdist\n'), ((878, 893), 'keras.backend.sum', 'K.sum', (['y_true_f'], {}), '(y_true_f)\n', (883, 893), True, 'from keras import backend as K\n'), ((896, 911), 'keras.backend.sum', 'K.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (901, 911), True, 'from keras import backend as K\n'), ((1586, 1608), 'keras.backend.min', 'K.min', (['concat'], {'axis': '(-1)'}), '(concat, axis=-1)\n', (1591, 1608), True, 'from keras import backend as K\n'), ((1651, 1673), 'keras.backend.max', 'K.max', (['concat'], {'axis': '(-1)'}), '(concat, axis=-1)\n', (1656, 1673), True, 'from keras import backend as K\n'), ((8388, 8401), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (8390, 8401), False, 'from keras.regularizers import l2\n'), ((8512, 8547), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(32)'}), '(filters=32, **params)\n', (8525, 8547), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8576, 8611), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(32)'}), '(filters=32, **params)\n', (8589, 8611), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8639, 8664), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (8651, 8664), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8693, 8728), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(64)'}), '(filters=64, **params)\n', (8706, 8728), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8756, 8791), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(64)'}), '(filters=64, **params)\n', (8769, 8791), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8819, 8844), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (8831, 8844), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8873, 8909), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(128)'}), '(filters=128, **params)\n', (8886, 8909), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((8937, 8973), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(128)'}), '(filters=128, **params)\n', (8950, 8973), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9001, 9026), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(3)'}), '(pool_size=3)\n', (9013, 9026), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9055, 9091), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(256)'}), '(filters=256, **params)\n', (9068, 9091), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9119, 9155), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(256)'}), '(filters=256, **params)\n', (9132, 9155), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9183, 9208), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(3)'}), '(pool_size=3)\n', (9195, 9208), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9237, 9273), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(512)'}), '(filters=512, **params)\n', (9250, 9273), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9301, 9337), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(512)'}), '(filters=512, **params)\n', (9314, 9337), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9442, 9478), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(256)'}), '(filters=256, **params)\n', (9455, 9478), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9504, 9540), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(256)'}), '(filters=256, **params)\n', (9517, 9540), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9645, 9681), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(128)'}), '(filters=128, **params)\n', (9658, 9681), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9707, 9743), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(128)'}), '(filters=128, **params)\n', (9720, 9743), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9848, 9883), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(64)'}), '(filters=64, **params)\n', (9861, 9883), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9909, 9944), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(64)'}), '(filters=64, **params)\n', (9922, 9944), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((10049, 10084), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(32)'}), '(filters=32, **params)\n', (10062, 10084), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((10110, 10145), 'keras.layers.Convolution3D', 'Convolution3D', ([], {'filters': '(32)'}), '(filters=32, **params)\n', (10123, 10145), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((21976, 22012), 'numpy.unique', 'np.unique', (['atom2residue[close_atoms]'], {}), '(atom2residue[close_atoms])\n', (21985, 22012), True, 'import numpy as np\n'), ((22043, 22084), 'numpy.concatenate', 'np.concatenate', (['residue2atom[residue_ids]'], {}), '(residue2atom[residue_ids])\n', (22057, 22084), True, 'import numpy as np\n'), ((20098, 20118), 'numpy.where', 'np.where', (['pocket_idx'], {}), '(pocket_idx)\n', (20106, 20118), True, 'import numpy as np\n'), ((21617, 21653), 'numpy.argwhere', 'np.argwhere', (['(pockets == pocket_label)'], {}), '(pockets == pocket_label)\n', (21628, 21653), True, 'import numpy as np\n'), ((9377, 9397), 'keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(3)'}), '(size=3)\n', (9389, 9397), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9580, 9600), 'keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(3)'}), '(size=3)\n', (9592, 9600), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9783, 9803), 'keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)'}), '(size=2)\n', (9795, 9803), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((9984, 10004), 'keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)'}), '(size=2)\n', (9996, 10004), False, 'from keras.layers import Input, Convolution3D, MaxPooling3D, UpSampling3D, concatenate\n'), ((10336, 10349), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (10338, 10349), False, 'from keras.regularizers import l2\n')] |
"""The implementation of classifier wrappers
"""
# Author: <NAME> <<EMAIL>>
from sklearn.model_selection import KFold
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import numpy as np
from model_wrapper import ModelWrapper
from pred_results import BinaryPredResults
class KfoldBinaryClassifierWrapper(ModelWrapper):
"""The class runs k-fold cross-validation on a sklearn classifier model.
Attributes
----------
k : int
The number of k folds in cross-validation
kfold : sklearn KFold
K-Folds cross-validator for a given k
"""
def __init__(self, data_frame, label_name, feature_names,\
categorical_feature_names, k=5):
ModelWrapper.__init__(self, data_frame, \
label_name, feature_names, categorical_feature_names)
self.k = k
self._kfold = None
self._generate_kfold()
def _generate_kfold(self):
"""Generate a K-Folds cross-validator for a given k.
Returns
-------
None
"""
self._kfold = KFold(n_splits=self.k, shuffle=True)
def _split_data(self, train_idx, test_idx):
"""Split the data_frame by training index and testing index.
Parameters
----------
train_idx : list
A list of traning indexes
test_idx : list
A list of testing indexes
Returns
-------
DataFrames
DataFrames that used in current fold of cross validation
"""
x_train = (self.data_frame[self.feature_names].iloc[train_idx, :])
y_train = self.data_frame[self.label_name].iloc[train_idx]
x_test = (self.data_frame[self.feature_names].iloc[test_idx, :])
y_test = self.data_frame[self.label_name].iloc[test_idx]
return x_train, y_train, x_test, y_test
def _transform_categorical_featurs(self):
"""Utilize the sklearn LabelEncoder to encode categorical features.
This is necessary for using categorical values that are represented in strings.
Returns
-------
None
"""
encoder = LabelEncoder()
# Transform each categorical feature with value between 0 and n_classes-1
for name in self.categorical_feature_names:
self.data_frame[name] = encoder.fit_transform(self.data_frame[name])
def _onehot_categorical_featurs(self, train_data, test_data):
"""Utilize the sklearn OneHotEncoder to encode categorical features.
In order to feed categorical features to sklearn model, we need to convert
them to one-hot encoding.
There are other different ways of using categorical features, such as using
pandas.get_dummies, but get_dummies will create additional features in
data_frame, which I perfer not to that.
Parameters
----------
train_data : DataFrame
Traning data
test_data : DataFrame
Testing data
Returns
-------
numpy matrix
Two numpy matrices with one-hot encoded features
"""
if self.categorical_feature_names == []:
return train_data, test_data
# Select indexs of categorical features, and encode, then transform them
feature_idxs = [self.data_frame.columns.get_loc(name) for name in \
self.categorical_feature_names]
encoder = OneHotEncoder(categorical_features=feature_idxs)
# Need to fit on every values from both training and testing
encoder.fit(np.vstack((train_data, test_data)))
train_data = encoder.transform(train_data)
test_data = encoder.transform(test_data)
return train_data, test_data
def run(self):
"""Run classifier model with k-fold cross-validation
Returns
-------
PredResult
Model-generated Prediction results
"""
results = BinaryPredResults(len(self.data_frame))
self._transform_categorical_featurs()
# Run k-fold cross-validation
for train_idx, test_idx in self._kfold.split(self.data_frame):
x_train, y_train, x_test, y_test = self._split_data(train_idx, test_idx)
x_train, x_test = self._onehot_categorical_featurs(x_train, x_test)
# Training
self.model.fit(x_train, y_train)
# Testing and generating predictions
y_pred_p = self.model.predict_proba(x_test)[:, 1]
y_pred_l = self.model.predict(x_test)
results.set_col(y_test, 'label', test_idx)
results.set_col(y_pred_p, 'pred_prob', test_idx)
results.set_col(y_pred_l, 'pred_label', test_idx)
return results
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.OneHotEncoder",
"numpy.vstack",
"sklearn.model_selection.KFold",
"model_wrapper.ModelWrapper.__init__"
] | [((697, 794), 'model_wrapper.ModelWrapper.__init__', 'ModelWrapper.__init__', (['self', 'data_frame', 'label_name', 'feature_names', 'categorical_feature_names'], {}), '(self, data_frame, label_name, feature_names,\n categorical_feature_names)\n', (718, 794), False, 'from model_wrapper import ModelWrapper\n'), ((1056, 1092), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.k', 'shuffle': '(True)'}), '(n_splits=self.k, shuffle=True)\n', (1061, 1092), False, 'from sklearn.model_selection import KFold\n'), ((2124, 2138), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2136, 2138), False, 'from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n'), ((3410, 3458), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categorical_features': 'feature_idxs'}), '(categorical_features=feature_idxs)\n', (3423, 3458), False, 'from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n'), ((3548, 3582), 'numpy.vstack', 'np.vstack', (['(train_data, test_data)'], {}), '((train_data, test_data))\n', (3557, 3582), True, 'import numpy as np\n')] |
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.core.indexes.timedeltas import timedelta_range
def test_asfreq_bug():
df = DataFrame(data=[1, 3], index=[timedelta(), timedelta(minutes=3)])
result = df.resample("1T").asfreq()
expected = DataFrame(
data=[1, np.nan, np.nan, 3],
index=timedelta_range("0 day", periods=4, freq="1T"),
)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13223
index = pd.to_timedelta(["0s", pd.NaT, "2s"])
result = DataFrame({"value": [2, 3, 5]}, index).resample("1s").mean()
expected = DataFrame(
{"value": [2.5, np.nan, 5.0]},
index=timedelta_range("0 day", periods=3, freq="1S"),
)
tm.assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod():
# GH 13022
index = timedelta_range("00:00:00", "00:10:00", freq="5T")
df = DataFrame(data={"value": [1, 5, 10]}, index=index)
result = df.resample("2T").asfreq()
expected_data = {"value": [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(
data=expected_data, index=timedelta_range("00:00:00", "00:10:00", freq="2T")
)
tm.assert_frame_equal(result, expected)
def test_resample_with_timedeltas():
expected = DataFrame({"A": np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = timedelta_range("0 days", freq="30T", periods=50)
df = DataFrame(
{"A": np.arange(1480)}, index=pd.to_timedelta(np.arange(1480), unit="T")
)
result = df.resample("30T").sum()
tm.assert_frame_equal(result, expected)
s = df["A"]
result = s.resample("30T").sum()
tm.assert_series_equal(result, expected["A"])
def test_resample_single_period_timedelta():
s = Series(list(range(5)), index=timedelta_range("1 day", freq="s", periods=5))
result = s.resample("2s").sum()
expected = Series([1, 5, 4], index=timedelta_range("1 day", freq="2s", periods=3))
tm.assert_series_equal(result, expected)
def test_resample_timedelta_idempotency():
# GH 12072
index = timedelta_range("0", periods=9, freq="10L")
series = Series(range(9), index=index)
result = series.resample("10L").mean()
expected = series.astype(float)
tm.assert_series_equal(result, expected)
def test_resample_offset_with_timedeltaindex():
# GH 10530 & 31809
rng = timedelta_range(start="0s", periods=25, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample("2s", offset="5s").mean()
without_base = ts.resample("2s").mean()
exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
exp_with_base = timedelta_range(start="5s", end="29s", freq="2s")
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex():
# GH #12169
df = DataFrame({"Group_obj": "A"}, index=pd.to_timedelta(list(range(20)), unit="s"))
df["Group"] = df["Group_obj"].astype("category")
result = df.resample("10s").agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame(
{"Group_obj": ["A", "A"], "Group": ["A", "A"]},
index=pd.TimedeltaIndex([0, 10], unit="s", freq="10s"),
)
expected = expected.reindex(["Group_obj", "Group"], axis=1)
expected["Group"] = expected["Group_obj"]
tm.assert_frame_equal(result, expected)
def test_resample_timedelta_values():
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range("1 day", "6 day", freq="4D")
df = DataFrame({"time": times}, index=times)
times2 = timedelta_range("1 day", "6 day", freq="2D")
exp = Series(times2, index=times2, name="time")
exp.iloc[1] = pd.NaT
res = df.resample("2D").first()["time"]
tm.assert_series_equal(res, exp)
res = df["time"].resample("2D").first()
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"start, end, freq, resample_freq",
[
("8H", "21h59min50s", "10S", "3H"), # GH 30353 example
("3H", "22H", "1H", "5H"),
("527D", "5006D", "3D", "10D"),
("1D", "10D", "1D", "2D"), # GH 13022 example
# tests that worked before GH 33498:
("8H", "21h59min50s", "10S", "2H"),
("0H", "21h59min50s", "10S", "3H"),
("10D", "85D", "D", "2D"),
],
)
def test_resample_timedelta_edge_case(start, end, freq, resample_freq):
# GH 33498
# check that the timedelta bins does not contains an extra bin
idx = timedelta_range(start=start, end=end, freq=freq)
s = Series(np.arange(len(idx)), index=idx)
result = s.resample(resample_freq).min()
expected_index = timedelta_range(freq=resample_freq, start=start, end=end)
tm.assert_index_equal(result.index, expected_index)
assert result.index.freq == expected_index.freq
assert not np.isnan(result[-1])
@pytest.mark.parametrize("duplicates", [True, False])
def test_resample_with_timedelta_yields_no_empty_groups(duplicates):
# GH 10603
df = DataFrame(
np.random.normal(size=(10000, 4)),
index=timedelta_range(start="0s", periods=10000, freq="3906250n"),
)
if duplicates:
# case with non-unique columns
df.columns = ["A", "B", "A", "C"]
result = df.loc["1s":, :].resample("3s").apply(lambda x: len(x))
expected = DataFrame(
[[768] * 4] * 12 + [[528] * 4],
index=timedelta_range(start="1s", periods=13, freq="3s"),
)
expected.columns = df.columns
tm.assert_frame_equal(result, expected)
def test_resample_quantile_timedelta():
# GH: 29485
df = DataFrame(
{"value": pd.to_timedelta(np.arange(4), unit="s")},
index=pd.date_range("20200101", periods=4, tz="UTC"),
)
result = df.resample("2D").quantile(0.99)
expected = DataFrame(
{
"value": [
pd.Timedelta("0 days 00:00:00.990000"),
pd.Timedelta("0 days 00:00:02.990000"),
]
},
index=pd.date_range("20200101", periods=2, tz="UTC", freq="2D"),
)
tm.assert_frame_equal(result, expected)
| [
"pandas.Series",
"numpy.random.normal",
"pandas.to_timedelta",
"pandas._testing.assert_series_equal",
"pandas.core.indexes.timedeltas.timedelta_range",
"pandas.Timedelta",
"datetime.timedelta",
"pandas._testing.assert_index_equal",
"pytest.mark.parametrize",
"numpy.isnan",
"pandas.date_range",
... | [((4289, 4586), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start, end, freq, resample_freq"""', "[('8H', '21h59min50s', '10S', '3H'), ('3H', '22H', '1H', '5H'), ('527D',\n '5006D', '3D', '10D'), ('1D', '10D', '1D', '2D'), ('8H', '21h59min50s',\n '10S', '2H'), ('0H', '21h59min50s', '10S', '3H'), ('10D', '85D', 'D', '2D')\n ]"], {}), "('start, end, freq, resample_freq', [('8H',\n '21h59min50s', '10S', '3H'), ('3H', '22H', '1H', '5H'), ('527D',\n '5006D', '3D', '10D'), ('1D', '10D', '1D', '2D'), ('8H', '21h59min50s',\n '10S', '2H'), ('0H', '21h59min50s', '10S', '3H'), ('10D', '85D', 'D',\n '2D')])\n", (4312, 4586), False, 'import pytest\n'), ((5286, 5338), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""duplicates"""', '[True, False]'], {}), "('duplicates', [True, False])\n", (5309, 5338), False, 'import pytest\n'), ((520, 559), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (541, 559), True, 'import pandas._testing as tm\n'), ((624, 661), 'pandas.to_timedelta', 'pd.to_timedelta', (["['0s', pd.NaT, '2s']"], {}), "(['0s', pd.NaT, '2s'])\n", (639, 661), True, 'import pandas as pd\n'), ((879, 918), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (900, 918), True, 'import pandas._testing as tm\n'), ((997, 1047), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""00:00:00"""', '"""00:10:00"""'], {'freq': '"""5T"""'}), "('00:00:00', '00:10:00', freq='5T')\n", (1012, 1047), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((1058, 1108), 'pandas.DataFrame', 'DataFrame', ([], {'data': "{'value': [1, 5, 10]}", 'index': 'index'}), "(data={'value': [1, 5, 10]}, index=index)\n", (1067, 1108), False, 'from pandas import DataFrame, Series\n'), ((1347, 1386), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (1368, 1386), True, 'import pandas._testing as tm\n'), ((1564, 1613), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""0 days"""'], {'freq': '"""30T"""', 'periods': '(50)'}), "('0 days', freq='30T', periods=50)\n", (1579, 1613), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((1772, 1811), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (1793, 1811), True, 'import pandas._testing as tm\n'), ((1874, 1919), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', "expected['A']"], {}), "(result, expected['A'])\n", (1896, 1919), True, 'import pandas._testing as tm\n'), ((2187, 2227), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (2209, 2227), True, 'import pandas._testing as tm\n'), ((2307, 2350), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""0"""'], {'periods': '(9)', 'freq': '"""10L"""'}), "('0', periods=9, freq='10L')\n", (2322, 2350), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((2481, 2521), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (2503, 2521), True, 'import pandas._testing as tm\n'), ((2610, 2659), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'start': '"""0s"""', 'periods': '(25)', 'freq': '"""s"""'}), "(start='0s', periods=25, freq='s')\n", (2625, 2659), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((2843, 2892), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'start': '"""0s"""', 'end': '"""25s"""', 'freq': '"""2s"""'}), "(start='0s', end='25s', freq='2s')\n", (2858, 2892), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((2914, 2963), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'start': '"""5s"""', 'end': '"""29s"""', 'freq': '"""2s"""'}), "(start='5s', end='29s', freq='2s')\n", (2929, 2963), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((2971, 3030), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['without_base.index', 'exp_without_base'], {}), '(without_base.index, exp_without_base)\n', (2992, 3030), True, 'import pandas._testing as tm\n'), ((3036, 3089), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['with_base.index', 'exp_with_base'], {}), '(with_base.index, exp_with_base)\n', (3057, 3089), True, 'import pandas._testing as tm\n'), ((3663, 3702), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3684, 3702), True, 'import pandas._testing as tm\n'), ((3880, 3924), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""1 day"""', '"""6 day"""'], {'freq': '"""4D"""'}), "('1 day', '6 day', freq='4D')\n", (3895, 3924), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((3935, 3974), 'pandas.DataFrame', 'DataFrame', (["{'time': times}"], {'index': 'times'}), "({'time': times}, index=times)\n", (3944, 3974), False, 'from pandas import DataFrame, Series\n'), ((3991, 4035), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""1 day"""', '"""6 day"""'], {'freq': '"""2D"""'}), "('1 day', '6 day', freq='2D')\n", (4006, 4035), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((4047, 4088), 'pandas.Series', 'Series', (['times2'], {'index': 'times2', 'name': '"""time"""'}), "(times2, index=times2, name='time')\n", (4053, 4088), False, 'from pandas import DataFrame, Series\n'), ((4167, 4199), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (4189, 4199), True, 'import pandas._testing as tm\n'), ((4250, 4282), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (4272, 4282), True, 'import pandas._testing as tm\n'), ((4910, 4958), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'start': 'start', 'end': 'end', 'freq': 'freq'}), '(start=start, end=end, freq=freq)\n', (4925, 4958), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((5075, 5132), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'freq': 'resample_freq', 'start': 'start', 'end': 'end'}), '(freq=resample_freq, start=start, end=end)\n', (5090, 5132), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((5138, 5189), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result.index', 'expected_index'], {}), '(result.index, expected_index)\n', (5159, 5189), True, 'import pandas._testing as tm\n'), ((5932, 5971), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (5953, 5971), True, 'import pandas._testing as tm\n'), ((6522, 6561), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (6543, 6561), True, 'import pandas._testing as tm\n'), ((5259, 5279), 'numpy.isnan', 'np.isnan', (['result[-1]'], {}), '(result[-1])\n', (5267, 5279), True, 'import numpy as np\n'), ((5455, 5488), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10000, 4)'}), '(size=(10000, 4))\n', (5471, 5488), True, 'import numpy as np\n'), ((460, 506), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""0 day"""'], {'periods': '(4)', 'freq': '"""1T"""'}), "('0 day', periods=4, freq='1T')\n", (475, 506), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((819, 865), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""0 day"""'], {'periods': '(3)', 'freq': '"""1S"""'}), "('0 day', periods=3, freq='1S')\n", (834, 865), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((1284, 1334), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""00:00:00"""', '"""00:10:00"""'], {'freq': '"""2T"""'}), "('00:00:00', '00:10:00', freq='2T')\n", (1299, 1334), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((1463, 1478), 'numpy.arange', 'np.arange', (['(1480)'], {}), '(1480)\n', (1472, 1478), True, 'import numpy as np\n'), ((1652, 1667), 'numpy.arange', 'np.arange', (['(1480)'], {}), '(1480)\n', (1661, 1667), True, 'import numpy as np\n'), ((2010, 2055), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""1 day"""'], {'freq': '"""s"""', 'periods': '(5)'}), "('1 day', freq='s', periods=5)\n", (2025, 2055), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((2134, 2180), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', (['"""1 day"""'], {'freq': '"""2s"""', 'periods': '(3)'}), "('1 day', freq='2s', periods=3)\n", (2149, 2180), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((3489, 3537), 'pandas.TimedeltaIndex', 'pd.TimedeltaIndex', (['[0, 10]'], {'unit': '"""s"""', 'freq': '"""10s"""'}), "([0, 10], unit='s', freq='10s')\n", (3506, 3537), True, 'import pandas as pd\n'), ((5505, 5564), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'start': '"""0s"""', 'periods': '(10000)', 'freq': '"""3906250n"""'}), "(start='0s', periods=10000, freq='3906250n')\n", (5520, 5564), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((5833, 5883), 'pandas.core.indexes.timedeltas.timedelta_range', 'timedelta_range', ([], {'start': '"""1s"""', 'periods': '(13)', 'freq': '"""3s"""'}), "(start='1s', periods=13, freq='3s')\n", (5848, 5883), False, 'from pandas.core.indexes.timedeltas import timedelta_range\n'), ((6131, 6177), 'pandas.date_range', 'pd.date_range', (['"""20200101"""'], {'periods': '(4)', 'tz': '"""UTC"""'}), "('20200101', periods=4, tz='UTC')\n", (6144, 6177), True, 'import pandas as pd\n'), ((6451, 6508), 'pandas.date_range', 'pd.date_range', (['"""20200101"""'], {'periods': '(2)', 'tz': '"""UTC"""', 'freq': '"""2D"""'}), "('20200101', periods=2, tz='UTC', freq='2D')\n", (6464, 6508), True, 'import pandas as pd\n'), ((303, 314), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (312, 314), False, 'from datetime import timedelta\n'), ((316, 336), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (325, 336), False, 'from datetime import timedelta\n'), ((1692, 1707), 'numpy.arange', 'np.arange', (['(1480)'], {}), '(1480)\n', (1701, 1707), True, 'import numpy as np\n'), ((6090, 6102), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6099, 6102), True, 'import numpy as np\n'), ((6312, 6350), 'pandas.Timedelta', 'pd.Timedelta', (['"""0 days 00:00:00.990000"""'], {}), "('0 days 00:00:00.990000')\n", (6324, 6350), True, 'import pandas as pd\n'), ((6369, 6407), 'pandas.Timedelta', 'pd.Timedelta', (['"""0 days 00:00:02.990000"""'], {}), "('0 days 00:00:02.990000')\n", (6381, 6407), True, 'import pandas as pd\n'), ((676, 714), 'pandas.DataFrame', 'DataFrame', (["{'value': [2, 3, 5]}", 'index'], {}), "({'value': [2, 3, 5]}, index)\n", (685, 714), False, 'from pandas import DataFrame, Series\n')] |
"""
This class contains the code to encode/decode data using BB-ANS with a VAE
"""
from ans import ANSCoder
import numpy as np
import distributions
def BBANS_append(posterior_pop, likelihood_append, prior_append):
"""
Given functions to pop a posterior, append a likelihood and append the prior,
return a function to append some data.
"""
def append(ans, data):
latent = posterior_pop(data)(ans)
likelihood_append(latent)(ans, data)
prior_append(ans, latent)
return append
def BBANS_pop(prior_pop, likelihood_pop, posterior_append):
"""
Given functions to pop a prior and likelihood and append
the posterior, return a function to pop data
"""
def pop(ans):
latent = prior_pop(ans)
data = likelihood_pop(latent)(ans)
posterior_append(data)(ans, latent)
return data
return pop
def VAE_append(latent_shape, generative_model, recognition_model,
obs_append, prior_precision, latent_precision):
"""
This append takes functions from a variational autoencoder and produces
an append and pop function for BBANS.
Follows the same layout as vae_append in the author's code
"""
def posterior_pop(data):
"""
Pop the posterior
"""
posterior_mean, posterior_stdd = recognition_model(data)
posterior_mean = np.ravel(posterior_mean)
posterior_stdd = np.ravel(posterior_stdd)
# we now have an array of mean and standard deviation values
# from the posterior distribution
cdfs = [distributions.gaussian_latent_cdf(mean, stdd, prior_precision, latent_precision)
for mean, stdd in zip(posterior_mean, posterior_stdd)]
ppfs = [distributions.gaussian_latent_ppf(mean, stdd, prior_precision, latent_precision)
for mean, stdd in zip(posterior_mean, posterior_stdd)]
return distributions.distr_pop(latent_precision, ppfs, cdfs)
def likelihood_append(latent_indices):
"""
Append the likelihood
"""
y = distributions.standard_gaussian_centers(prior_precision)[latent_indices]
obs_parameters = generative_model(np.reshape(y, latent_shape))
return obs_append(obs_parameters)
prior_append = distributions.uniforms_append(prior_precision)
return BBANS_append(posterior_pop, likelihood_append, prior_append)
def VAE_pop(latent_shape, generative_model, recognition_model,
obs_pop, prior_precision, latent_precision):
"""
Pop a symbol using VAE BBANS
"""
prior_pop = distributions.uniforms_pop(prior_precision, np.prod(latent_shape))
def likelihood_pop(latent_indices):
y = distributions.standard_gaussian_centers(prior_precision)[latent_indices]
obs_params = generative_model(np.reshape(y, latent_shape))
return obs_pop(obs_params)
def posterior_append(data):
posterior_mean, posterior_stdd = recognition_model(np.atleast_2d(data))
posterior_mean = np.ravel(posterior_mean)
posterior_stdd = np.ravel(posterior_stdd)
cdfs = [distributions.gaussian_latent_cdf(mean, stdd, prior_precision, latent_precision)
for mean, stdd in zip(posterior_mean, posterior_stdd)]
return distributions.distr_append(latent_precision, cdfs)
return BBANS_pop(prior_pop, likelihood_pop, posterior_append) | [
"numpy.prod",
"distributions.gaussian_latent_ppf",
"numpy.atleast_2d",
"numpy.reshape",
"distributions.uniforms_append",
"distributions.standard_gaussian_centers",
"distributions.distr_pop",
"numpy.ravel",
"distributions.distr_append",
"distributions.gaussian_latent_cdf"
] | [((2296, 2342), 'distributions.uniforms_append', 'distributions.uniforms_append', (['prior_precision'], {}), '(prior_precision)\n', (2325, 2342), False, 'import distributions\n'), ((1385, 1409), 'numpy.ravel', 'np.ravel', (['posterior_mean'], {}), '(posterior_mean)\n', (1393, 1409), True, 'import numpy as np\n'), ((1435, 1459), 'numpy.ravel', 'np.ravel', (['posterior_stdd'], {}), '(posterior_stdd)\n', (1443, 1459), True, 'import numpy as np\n'), ((1926, 1979), 'distributions.distr_pop', 'distributions.distr_pop', (['latent_precision', 'ppfs', 'cdfs'], {}), '(latent_precision, ppfs, cdfs)\n', (1949, 1979), False, 'import distributions\n'), ((2646, 2667), 'numpy.prod', 'np.prod', (['latent_shape'], {}), '(latent_shape)\n', (2653, 2667), True, 'import numpy as np\n'), ((3035, 3059), 'numpy.ravel', 'np.ravel', (['posterior_mean'], {}), '(posterior_mean)\n', (3043, 3059), True, 'import numpy as np\n'), ((3085, 3109), 'numpy.ravel', 'np.ravel', (['posterior_stdd'], {}), '(posterior_stdd)\n', (3093, 3109), True, 'import numpy as np\n'), ((3293, 3343), 'distributions.distr_append', 'distributions.distr_append', (['latent_precision', 'cdfs'], {}), '(latent_precision, cdfs)\n', (3319, 3343), False, 'import distributions\n'), ((1588, 1673), 'distributions.gaussian_latent_cdf', 'distributions.gaussian_latent_cdf', (['mean', 'stdd', 'prior_precision', 'latent_precision'], {}), '(mean, stdd, prior_precision, latent_precision\n )\n', (1621, 1673), False, 'import distributions\n'), ((1758, 1843), 'distributions.gaussian_latent_ppf', 'distributions.gaussian_latent_ppf', (['mean', 'stdd', 'prior_precision', 'latent_precision'], {}), '(mean, stdd, prior_precision, latent_precision\n )\n', (1791, 1843), False, 'import distributions\n'), ((2090, 2146), 'distributions.standard_gaussian_centers', 'distributions.standard_gaussian_centers', (['prior_precision'], {}), '(prior_precision)\n', (2129, 2146), False, 'import distributions\n'), ((2205, 2232), 'numpy.reshape', 'np.reshape', (['y', 'latent_shape'], {}), '(y, latent_shape)\n', (2215, 2232), True, 'import numpy as np\n'), ((2722, 2778), 'distributions.standard_gaussian_centers', 'distributions.standard_gaussian_centers', (['prior_precision'], {}), '(prior_precision)\n', (2761, 2778), False, 'import distributions\n'), ((2833, 2860), 'numpy.reshape', 'np.reshape', (['y', 'latent_shape'], {}), '(y, latent_shape)\n', (2843, 2860), True, 'import numpy as np\n'), ((2989, 3008), 'numpy.atleast_2d', 'np.atleast_2d', (['data'], {}), '(data)\n', (3002, 3008), True, 'import numpy as np\n'), ((3126, 3211), 'distributions.gaussian_latent_cdf', 'distributions.gaussian_latent_cdf', (['mean', 'stdd', 'prior_precision', 'latent_precision'], {}), '(mean, stdd, prior_precision, latent_precision\n )\n', (3159, 3211), False, 'import distributions\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from tqdm import tqdm
import numpy as np
from milvus_util import VecToMilvus
def vector_insert(file_path):
embeddings = np.load(file_path)
print(embeddings.shape)
embedding_ids = [i for i in range(embeddings.shape[0])]
print(len(embedding_ids))
client = VecToMilvus()
collection_name = 'faq_finance'
partition_tag = 'partition_1'
data_size = len(embedding_ids)
batch_size = 100000
for i in tqdm(range(0, data_size, batch_size)):
cur_end = i + batch_size
if (cur_end > data_size):
cur_end = data_size
batch_emb = embeddings[np.arange(i, cur_end)]
status, ids = client.insert(
collection_name=collection_name,
vectors=batch_emb.tolist(),
ids=embedding_ids[i:i + batch_size],
partition_tag=partition_tag)
if __name__ == "__main__":
file_path = 'corpus_embedding.npy'
vector_insert(file_path)
| [
"numpy.load",
"milvus_util.VecToMilvus",
"numpy.arange"
] | [((753, 771), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (760, 771), True, 'import numpy as np\n'), ((903, 916), 'milvus_util.VecToMilvus', 'VecToMilvus', ([], {}), '()\n', (914, 916), False, 'from milvus_util import VecToMilvus\n'), ((1228, 1249), 'numpy.arange', 'np.arange', (['i', 'cur_end'], {}), '(i, cur_end)\n', (1237, 1249), True, 'import numpy as np\n')] |
"""
=======================================
Receiver Operating Characteristic Curve
=======================================
Example of plotting the ROC curve for a classification task.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklvq import GMLVQ
matplotlib.rc("xtick", labelsize="small")
matplotlib.rc("ytick", labelsize="small")
data, labels = load_breast_cancer(return_X_y=True)
###############################################################################
# Create a GMLVQ object and pass it a distance function, activation function and solver. See the
# API reference under documentation for defaults.
model = GMLVQ(
distance_type="adaptive-squared-euclidean",
activation_type="swish",
activation_params={"beta": 2},
solver_type="waypoint-gradient-descent",
solver_params={"max_runs": 10, "k": 3, "step_size": np.array([0.1, 0.05])},
random_state=31415,
)
###############################################################################
# Fit the GMLVQ object to the data and plot the roc curve.
# Object to perform z-transform
scaler = StandardScaler()
# Compute (fit) and apply (transform) z-transform
data = scaler.fit_transform(data)
# Train the model using the scaled X and true labels
model.fit(data, labels)
# Get the decision values (which are used in predict) instead of the labels. The values are with
# respect to the "greater" class, i.e., index 1.
label_score = model.decision_function(data)
# roc_curve expects the y_score to be with respect to the positive class.
fpr, tpr, thresholds = roc_curve(
y_true=labels, y_score=label_score, pos_label=1, drop_intermediate=True
)
roc_auc = roc_auc_score(y_true=labels, y_score=label_score)
# Sometimes it is good to know where the Nearest prototype classifier is on this curve. This can
# be computed using the confusion matrix function from sklearn.
tn, fp, fn, tp = confusion_matrix(y_true=labels, y_pred=model.predict(data)).ravel()
# The tpr and fpr of the npc are then given by:
npc_tpr = tp / (tp + fn)
npc_fpr = fp / (fp + tn)
fig, ax = plt.subplots()
fig.suptitle("Receiver operating characteristic ")
# Plot the ROC curve
ax.plot(fpr, tpr, color="darkorange", lw=2, label="ROC AUC = {:.3f}".format(roc_auc))
# Plot the random line
ax.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
# Plot the NPC classifier
ax.plot(npc_fpr, npc_tpr, color="green", marker="o", markersize="12")
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.legend(loc="lower right")
ax.grid(False)
| [
"sklearn.datasets.load_breast_cancer",
"sklearn.metrics.roc_auc_score",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"sklearn.metrics.roc_curve",
"matplotlib.rc",
"matplotlib.pyplot.subplots"
] | [((454, 495), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': '"""small"""'}), "('xtick', labelsize='small')\n", (467, 495), False, 'import matplotlib\n'), ((496, 537), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': '"""small"""'}), "('ytick', labelsize='small')\n", (509, 537), False, 'import matplotlib\n'), ((554, 589), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (572, 589), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1279, 1295), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1293, 1295), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1748, 1834), 'sklearn.metrics.roc_curve', 'roc_curve', ([], {'y_true': 'labels', 'y_score': 'label_score', 'pos_label': '(1)', 'drop_intermediate': '(True)'}), '(y_true=labels, y_score=label_score, pos_label=1,\n drop_intermediate=True)\n', (1757, 1834), False, 'from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix\n'), ((1847, 1896), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ([], {'y_true': 'labels', 'y_score': 'label_score'}), '(y_true=labels, y_score=label_score)\n', (1860, 1896), False, 'from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix\n'), ((2256, 2270), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2268, 2270), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1068), 'numpy.array', 'np.array', (['[0.1, 0.05]'], {}), '([0.1, 0.05])\n', (1055, 1068), True, 'import numpy as np\n')] |
'''
This is the net model of Environmental features recognition for lower limb prostheses toward predictive walking.
If you think this code is useful, please cite:
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“Environmental features recognition for lower limb prostheses toward predictive walking,”
IEEE Transactions on Neural Systems and Rehabilitation Engineering, vol. 27, no. 3, pp. 465–476, Mar. 2019.
'''
from __future__ import print_function
import keras
import glob
import numpy as np
import cv2
import tensorflow as tf
from sklearn.model_selection import train_test_split
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from zipfile import ZipFile
def load_data(path = 'data/', num_classes = 6, image_shape = (100, 100, 1)):
file_vec = glob.glob(path + '/*/*.png')
if 0 == len(file_vec):
with ZipFile('data.zip', 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall()
file_vec = glob.glob(path + '/*/*.png')
file_num = len(file_vec)
X = np.zeros((file_num,) + image_shape)
y = np.zeros(file_num)
idx = 0
for n in range(num_classes):
for file in glob.glob(path + str(n+1) + '/*.png'):
img = cv2.imread(file, -1)
img = np.reshape(img, image_shape)
X[idx,...] = img
y[idx] = n
idx += 1
y = keras.utils.to_categorical(y, num_classes=num_classes)
print(X.shape, y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=1)
return (X_train, y_train), (X_test, y_test)
def stats_graph(graph):
flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())
print('FLOPs: {}; Trainable params: {}'.format(flops.total_float_ops, params.total_parameters))
def calc_net_size():
sess = K.get_session()
graph = sess.graph
stats_graph(graph)
batch_size = 128
num_classes = 5
epochs = 30
image_shape = (100, 100, 1)
# input image dimensions
img_rows, img_cols = image_shape[0], image_shape[1]
(x_train, y_train), (x_test, y_test) = load_data(image_shape = image_shape,
num_classes=num_classes)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# deep CNN
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model_path = 'checkpoint/best_model.h5'
checkpoint = ModelCheckpoint(model_path,
verbose=1, monitor='val_acc',
save_best_only=True, mode='auto')
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
is_train = True
if is_train:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.25, callbacks=[checkpoint])
# load the best model
# model.load_weights(model_path)
# calc_net_size()
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
"keras.layers.Conv2D",
"zipfile.ZipFile",
"keras.utils.to_categorical",
"keras.layers.Dense",
"tensorflow.profiler.ProfileOptionBuilder.trainable_variables_parameter",
"keras.backend.image_data_format",
"numpy.reshape",
"glob.glob",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.layers.... | [((3292, 3304), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3302, 3304), False, 'from keras.models import Sequential\n'), ((4009, 4105), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_path'], {'verbose': '(1)', 'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'mode': '"""auto"""'}), "(model_path, verbose=1, monitor='val_acc', save_best_only=\n True, mode='auto')\n", (4024, 4105), False, 'from keras.callbacks import ModelCheckpoint\n'), ((987, 1015), 'glob.glob', 'glob.glob', (["(path + '/*/*.png')"], {}), "(path + '/*/*.png')\n", (996, 1015), False, 'import glob\n'), ((1279, 1314), 'numpy.zeros', 'np.zeros', (['((file_num,) + image_shape)'], {}), '((file_num,) + image_shape)\n', (1287, 1314), True, 'import numpy as np\n'), ((1323, 1341), 'numpy.zeros', 'np.zeros', (['file_num'], {}), '(file_num)\n', (1331, 1341), True, 'import numpy as np\n'), ((1613, 1667), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y'], {'num_classes': 'num_classes'}), '(y, num_classes=num_classes)\n', (1639, 1667), False, 'import keras\n'), ((1735, 1788), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, y, test_size=0.2, random_state=1)\n', (1751, 1788), False, 'from sklearn.model_selection import train_test_split\n'), ((2269, 2284), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2282, 2284), True, 'from keras import backend as K\n'), ((2642, 2663), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (2661, 2663), True, 'from keras import backend as K\n'), ((3315, 3389), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (3321, 3389), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3435, 3455), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3453, 3455), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3467, 3497), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3479, 3497), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3509, 3584), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(128, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (3515, 3584), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3630, 3650), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3648, 3650), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3662, 3692), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3674, 3692), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3704, 3742), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (3710, 3742), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3754, 3774), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3772, 3774), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n'), ((3786, 3798), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (3793, 3798), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3810, 3819), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3817, 3819), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3831, 3860), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (3836, 3860), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3872, 3900), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (3877, 3900), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((3912, 3952), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (3917, 3952), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1213, 1241), 'glob.glob', 'glob.glob', (["(path + '/*/*.png')"], {}), "(path + '/*/*.png')\n", (1222, 1241), False, 'import glob\n'), ((4243, 4266), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (4264, 4266), False, 'import keras\n'), ((1056, 1080), 'zipfile.ZipFile', 'ZipFile', (['"""data.zip"""', '"""r"""'], {}), "('data.zip', 'r')\n", (1063, 1080), False, 'from zipfile import ZipFile\n'), ((1464, 1484), 'cv2.imread', 'cv2.imread', (['file', '(-1)'], {}), '(file, -1)\n', (1474, 1484), False, 'import cv2\n'), ((1503, 1531), 'numpy.reshape', 'np.reshape', (['img', 'image_shape'], {}), '(img, image_shape)\n', (1513, 1531), True, 'import numpy as np\n'), ((1967, 2017), 'tensorflow.profiler.ProfileOptionBuilder.float_operation', 'tf.profiler.ProfileOptionBuilder.float_operation', ([], {}), '()\n', (2015, 2017), True, 'import tensorflow as tf\n'), ((2067, 2131), 'tensorflow.profiler.ProfileOptionBuilder.trainable_variables_parameter', 'tf.profiler.ProfileOptionBuilder.trainable_variables_parameter', ([], {}), '()\n', (2129, 2131), True, 'import tensorflow as tf\n')] |
import sys
import h5py
import torch
from torch import nn
from torch import cuda
import string
import re
from collections import Counter
import numpy as np
def to_device(x, gpuid):
if gpuid == -1:
return x.cpu()
if x.device != gpuid:
return x.cuda(gpuid)
return x
def has_nan(t):
return torch.isnan(t).sum() == 1
def tensor_on_dev(t, is_cuda):
if is_cuda:
return t.cuda()
else:
return t
def pick_label(dist):
return np.argmax(dist, axis=1)
def torch2np(t, is_cuda):
return t.numpy() if not is_cuda else t.cpu().numpy()
def save_opt(opt, path):
with open(path, 'w') as f:
f.write('{0}'.format(opt))
def load_param_dict(path):
# TODO, this is ugly
f = h5py.File(path, 'r')
return f
def save_param_dict(param_dict, path):
file = h5py.File(path, 'w')
for name, p in param_dict.items():
file.create_dataset(name, data=p)
file.close()
def load_dict(path):
rs = {}
with open(path, 'r+') as f:
for l in f:
if l.strip() == '':
continue
w, idx, cnt = l.strip().split()
rs[int(idx)] = w
return rs
def rand_tensor(shape, r1, r2):
return (r1 - r2) * torch.rand(shape) + r2
def build_rnn(type, input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional):
if type == 'lstm':
return nn.LSTM(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional)
elif type == 'gru':
return nn.GRU(input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bias=bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional)
else:
assert(False)
if __name__ == '__main__':
pass | [
"torch.nn.LSTM",
"numpy.argmax",
"h5py.File",
"torch.isnan",
"torch.rand",
"torch.nn.GRU"
] | [((434, 457), 'numpy.argmax', 'np.argmax', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (443, 457), True, 'import numpy as np\n'), ((678, 698), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (687, 698), False, 'import h5py\n'), ((758, 778), 'h5py.File', 'h5py.File', (['path', '"""w"""'], {}), "(path, 'w')\n", (767, 778), False, 'import h5py\n'), ((1252, 1421), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'bias': 'bias', 'batch_first': 'batch_first', 'dropout': 'dropout', 'bidirectional': 'bidirectional'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, bias=bias, batch_first=batch_first, dropout=dropout,\n bidirectional=bidirectional)\n', (1259, 1421), False, 'from torch import nn\n'), ((1097, 1114), 'torch.rand', 'torch.rand', (['shape'], {}), '(shape)\n', (1107, 1114), False, 'import torch\n'), ((1461, 1629), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': 'input_size', 'hidden_size': 'hidden_size', 'num_layers': 'num_layers', 'bias': 'bias', 'batch_first': 'batch_first', 'dropout': 'dropout', 'bidirectional': 'bidirectional'}), '(input_size=input_size, hidden_size=hidden_size, num_layers=\n num_layers, bias=bias, batch_first=batch_first, dropout=dropout,\n bidirectional=bidirectional)\n', (1467, 1629), False, 'from torch import nn\n'), ((296, 310), 'torch.isnan', 'torch.isnan', (['t'], {}), '(t)\n', (307, 310), False, 'import torch\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions exclusively useful in colab notebooks."""
import matplotlib.pyplot as plt
import numpy as np
from tensorflow_graphics.notebooks import mesh_viewer as threejs_viz
# Previously:
# default view_dir was (0.5, 0.5, 0.0)
# bottom view_dir was (-0.5, -0.5, 0.0
# back view_dir was (-0.5, 0.0, 0.0)
# side view_dir was (0.0, 0.0, 0.5)
def trimesh_to_shape(mesh):
"""Converts a trimesh to a shape dict."""
if not mesh:
raise ValueError('Cannot convert an empty trimesh.')
shape = {'vertices': mesh.vertices, 'faces': mesh.faces}
if mesh.visual.kind == 'vertex' and mesh.visual.vertex_colors.any():
shape['vertex_colors'] = np.array(
mesh.visual.vertex_colors[:, :3], dtype=np.float) / 255.0
return shape
def show(mesh, res=256):
shape_viewer = threejs_viz.Viewer(mesh)
del res
return shape_viewer
def plot(im):
im = np.squeeze(im)
plt.imshow(im)
plt.grid(b=None)
plt.axis('off')
def plot_all(ims):
im = np.concatenate(ims, axis=1)
plot(im)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.grid",
"numpy.squeeze",
"numpy.array",
"tensorflow_graphics.notebooks.mesh_viewer.Viewer",
"numpy.concatenate",
"matplotlib.pyplot.axis"
] | [((1425, 1449), 'tensorflow_graphics.notebooks.mesh_viewer.Viewer', 'threejs_viz.Viewer', (['mesh'], {}), '(mesh)\n', (1443, 1449), True, 'from tensorflow_graphics.notebooks import mesh_viewer as threejs_viz\n'), ((1511, 1525), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (1521, 1525), True, 'import numpy as np\n'), ((1529, 1543), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (1539, 1543), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1563), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': 'None'}), '(b=None)\n', (1555, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1567, 1582), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1575, 1582), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1642), 'numpy.concatenate', 'np.concatenate', (['ims'], {'axis': '(1)'}), '(ims, axis=1)\n', (1629, 1642), True, 'import numpy as np\n'), ((1284, 1342), 'numpy.array', 'np.array', (['mesh.visual.vertex_colors[:, :3]'], {'dtype': 'np.float'}), '(mesh.visual.vertex_colors[:, :3], dtype=np.float)\n', (1292, 1342), True, 'import numpy as np\n')] |
import numpy as np
import pyximport
pyximport.install()
from .cython_nms.cpu_nms import greedy_nms, soft_nms
def cython_soft_nms_wrapper(thresh, sigma=0.5, score_thresh=0.001, method='linear'):
methods = {'hard': 0, 'linear': 1, 'gaussian': 2}
assert method in methods, 'Unknown soft_nms method: {}'.format(method)
def _nms(dets):
dets, _ = soft_nms(
np.ascontiguousarray(dets, dtype=np.float32),
np.float32(sigma),
np.float32(thresh),
np.float32(score_thresh),
np.uint8(methods[method]))
return dets
return _nms
def py_nms_wrapper(thresh):
def _nms(dets):
return nms(dets, thresh)
return _nms
def cpu_nms_wrapper(thresh):
def _nms(dets):
return greedy_nms(dets, thresh)[0]
return _nms
def wnms_wrapper(thresh_lo, thresh_hi):
def _nms(dets):
return py_weighted_nms(dets, thresh_lo, thresh_hi)
return _nms
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep, :]
def py_weighted_nms(dets, thresh_lo, thresh_hi):
"""
voting boxes with confidence > thresh_hi
keep boxes overlap <= thresh_lo
rule out overlap > thresh_hi
:param dets: [[x1, y1, x2, y2 score]]
:param thresh_lo: retain overlap <= thresh_lo
:param thresh_hi: vote overlap > thresh_hi
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
xx1 = np.maximum(x1[i], x1[order])
yy1 = np.maximum(y1[i], y1[order])
xx2 = np.minimum(x2[i], x2[order])
yy2 = np.minimum(y2[i], y2[order])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order] - inter)
inds = np.where(ovr <= thresh_lo)[0]
inds_keep = np.where(ovr > thresh_hi)[0]
if len(inds_keep) == 0:
break
order_keep = order[inds_keep]
tmp=np.sum(scores[order_keep])
x1_avg = np.sum(scores[order_keep] * x1[order_keep]) / tmp
y1_avg = np.sum(scores[order_keep] * y1[order_keep]) / tmp
x2_avg = np.sum(scores[order_keep] * x2[order_keep]) / tmp
y2_avg = np.sum(scores[order_keep] * y2[order_keep]) / tmp
keep.append([x1_avg, y1_avg, x2_avg, y2_avg, scores[i]])
order = order[inds]
return np.array(keep)
| [
"numpy.uint8",
"numpy.minimum",
"numpy.where",
"numpy.ascontiguousarray",
"numpy.array",
"pyximport.install",
"numpy.sum",
"numpy.maximum",
"numpy.float32"
] | [((36, 55), 'pyximport.install', 'pyximport.install', ([], {}), '()\n', (53, 55), False, 'import pyximport\n'), ((3521, 3535), 'numpy.array', 'np.array', (['keep'], {}), '(keep)\n', (3529, 3535), True, 'import numpy as np\n'), ((1550, 1582), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (1560, 1582), True, 'import numpy as np\n'), ((1597, 1629), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (1607, 1629), True, 'import numpy as np\n'), ((1644, 1676), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (1654, 1676), True, 'import numpy as np\n'), ((1691, 1723), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (1701, 1723), True, 'import numpy as np\n'), ((1737, 1767), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (1747, 1767), True, 'import numpy as np\n'), ((1780, 1810), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (1790, 1810), True, 'import numpy as np\n'), ((2601, 2629), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order]'], {}), '(x1[i], x1[order])\n', (2611, 2629), True, 'import numpy as np\n'), ((2644, 2672), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order]'], {}), '(y1[i], y1[order])\n', (2654, 2672), True, 'import numpy as np\n'), ((2687, 2715), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order]'], {}), '(x2[i], x2[order])\n', (2697, 2715), True, 'import numpy as np\n'), ((2730, 2758), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order]'], {}), '(y2[i], y2[order])\n', (2740, 2758), True, 'import numpy as np\n'), ((2772, 2802), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (2782, 2802), True, 'import numpy as np\n'), ((2815, 2845), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (2825, 2845), True, 'import numpy as np\n'), ((3121, 3147), 'numpy.sum', 'np.sum', (['scores[order_keep]'], {}), '(scores[order_keep])\n', (3127, 3147), True, 'import numpy as np\n'), ((393, 437), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dets'], {'dtype': 'np.float32'}), '(dets, dtype=np.float32)\n', (413, 437), True, 'import numpy as np\n'), ((459, 476), 'numpy.float32', 'np.float32', (['sigma'], {}), '(sigma)\n', (469, 476), True, 'import numpy as np\n'), ((498, 516), 'numpy.float32', 'np.float32', (['thresh'], {}), '(thresh)\n', (508, 516), True, 'import numpy as np\n'), ((538, 562), 'numpy.float32', 'np.float32', (['score_thresh'], {}), '(score_thresh)\n', (548, 562), True, 'import numpy as np\n'), ((584, 609), 'numpy.uint8', 'np.uint8', (['methods[method]'], {}), '(methods[method])\n', (592, 609), True, 'import numpy as np\n'), ((1909, 1932), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (1917, 1932), True, 'import numpy as np\n'), ((2940, 2966), 'numpy.where', 'np.where', (['(ovr <= thresh_lo)'], {}), '(ovr <= thresh_lo)\n', (2948, 2966), True, 'import numpy as np\n'), ((2990, 3015), 'numpy.where', 'np.where', (['(ovr > thresh_hi)'], {}), '(ovr > thresh_hi)\n', (2998, 3015), True, 'import numpy as np\n'), ((3165, 3208), 'numpy.sum', 'np.sum', (['(scores[order_keep] * x1[order_keep])'], {}), '(scores[order_keep] * x1[order_keep])\n', (3171, 3208), True, 'import numpy as np\n'), ((3232, 3275), 'numpy.sum', 'np.sum', (['(scores[order_keep] * y1[order_keep])'], {}), '(scores[order_keep] * y1[order_keep])\n', (3238, 3275), True, 'import numpy as np\n'), ((3299, 3342), 'numpy.sum', 'np.sum', (['(scores[order_keep] * x2[order_keep])'], {}), '(scores[order_keep] * x2[order_keep])\n', (3305, 3342), True, 'import numpy as np\n'), ((3366, 3409), 'numpy.sum', 'np.sum', (['(scores[order_keep] * y2[order_keep])'], {}), '(scores[order_keep] * y2[order_keep])\n', (3372, 3409), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import time
if "flush" in dir(np):
np.flush()
begin = time.time()
#a = np.sum(((np.ones(100)+1.0)*2.0)/2.0)
a = np.sum(np.random.random(50000000))
#a = np.multiply.accumulate(np.ones((8,8), dtype=np.float32))
print(a)
if "flush" in dir(np):
np.flush()
end = time.time() - begin
print(end)
| [
"numpy.random.random",
"numpy.flush",
"time.time"
] | [((100, 111), 'time.time', 'time.time', ([], {}), '()\n', (109, 111), False, 'import time\n'), ((81, 91), 'numpy.flush', 'np.flush', ([], {}), '()\n', (89, 91), True, 'import numpy as np\n'), ((166, 192), 'numpy.random.random', 'np.random.random', (['(50000000)'], {}), '(50000000)\n', (182, 192), True, 'import numpy as np\n'), ((293, 303), 'numpy.flush', 'np.flush', ([], {}), '()\n', (301, 303), True, 'import numpy as np\n'), ((311, 322), 'time.time', 'time.time', ([], {}), '()\n', (320, 322), False, 'import time\n')] |
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import numpy as np
from embedding_net.models import EmbeddingNet, TripletNet, SiameseNet
from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from embedding_net.datagenerators import ENDataLoader, SimpleDataGenerator, TripletsDataGenerator, SimpleTripletsDataGenerator, SiameseDataGenerator
from embedding_net.utils import parse_params, plot_grapths
from embedding_net.backbones import pretrain_backbone_softmax
from embedding_net.losses_and_accuracies import contrastive_loss, triplet_loss, accuracy
import argparse
from tensorflow import keras
from tensorflow.keras.utils import multi_gpu_model
import tensorflow as tf
def parse_args():
parser = argparse.ArgumentParser(description='Train a classificator')
parser.add_argument('config', help='model config file path')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
args = parser.parse_args()
return args
def create_save_folders(params):
work_dir_path = os.path.join(params['work_dir'], params['project_name'])
weights_save_path = os.path.join(work_dir_path, 'weights/')
weights_pretrained_save_path = os.path.join(work_dir_path, 'pretraining_model/weights/')
encodings_save_path = os.path.join(work_dir_path, 'encodings/')
plots_save_path = os.path.join(work_dir_path, 'plots/')
tensorboard_save_path = os.path.join(work_dir_path, 'tf_log/')
tensorboard_pretrained_save_path = os.path.join(work_dir_path, 'pretraining_model/tf_log/')
weights_save_file_path = os.path.join(weights_save_path, 'epoch_{epoch:03d}' + '.hdf5')
os.makedirs(work_dir_path , exist_ok=True)
os.makedirs(weights_save_path, exist_ok=True)
os.makedirs(weights_pretrained_save_path, exist_ok=True)
os.makedirs(encodings_save_path, exist_ok=True)
os.makedirs(plots_save_path, exist_ok=True)
os.makedirs(tensorboard_pretrained_save_path, exist_ok=True)
return tensorboard_save_path, weights_save_file_path, plots_save_path
def main():
print('LOAD PARAMETERS')
args = parse_args()
cfg_params = parse_params(args.config)
params_train = cfg_params['train']
params_model = cfg_params['model']
params_dataloader = cfg_params['dataloader']
params_generator = cfg_params['generator']
tensorboard_save_path, weights_save_file_path, plots_save_path = create_save_folders(cfg_params['general'])
work_dir_path = os.path.join(cfg_params['general']['work_dir'],
cfg_params['general']['project_name'])
weights_save_path = os.path.join(work_dir_path, 'weights/')
initial_lr = params_train['learning_rate']
decay_factor = params_train['decay_factor']
step_size = params_train['step_size']
if params_dataloader['validate']:
callback_monitor = 'val_loss'
else:
callback_monitor = 'loss'
print('LOADING COMPLETED')
callbacks = [
LearningRateScheduler(lambda x: initial_lr *
decay_factor ** np.floor(x/step_size)),
ReduceLROnPlateau(monitor=callback_monitor, factor=0.1,
patience=4, verbose=1),
EarlyStopping(monitor=callback_monitor,
patience=10,
verbose=1),
ModelCheckpoint(filepath=weights_save_file_path,
monitor=callback_monitor,
save_best_only=True,
verbose=1)
]
print('CREATE DATALOADER')
data_loader = ENDataLoader(**params_dataloader)
print('DATALOADER CREATED!')
if cfg_params['general']['tensorboard_callback']:
callbacks.append(TensorBoard(log_dir=tensorboard_save_path))
if cfg_params['general']['wandb_callback']:
import wandb
from wandb.keras import WandbCallback
wandb.init()
callbacks.append(WandbCallback(data_type="image", labels=data_loader.class_names))
val_generator = None
print('CREATE MODEL AND DATA GENETATORS')
if params_model['mode'] == 'siamese':
model = SiameseNet(cfg_params, training=True)
train_generator = SiameseDataGenerator(class_files_paths=data_loader.train_data,
class_names=data_loader.class_names,
**params_generator)
if data_loader.validate:
val_generator = SiameseDataGenerator(class_files_paths=data_loader.val_data,
class_names=data_loader.class_names,
val_gen = True,
**params_generator)
losses = {'output_siamese' : contrastive_loss}
metric = {'output_siamese' : accuracy}
else:
if cfg_params['general']['gpu_ids']:
print('Multiple gpu mode')
gpu_ids = cfg_params['general']['gpu_ids']
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_ids
print(f'Using gpu ids: {gpu_ids}')
gpu_ids_list = gpu_ids.split(',')
n_gpu = len(gpu_ids_list)
else:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
n_gpu = 1
print('Use single gpu mode')
model = TripletNet(cfg_params, training=True)
if n_gpu>1:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model.base_model = multi_gpu_model(model.base_model, gpus=n_gpu)
# model.base_model = tf.keras.utils.multi_gpu_model(model.base_model, gpus=n_gpu)
train_generator = TripletsDataGenerator(embedding_model=model.base_model,
class_files_paths=data_loader.train_data,
class_names=data_loader.class_names,
**params_generator)
if data_loader.validate:
val_generator = SimpleTripletsDataGenerator(data_loader.val_data,
data_loader.class_names,
**params_generator)
losses = triplet_loss(params_generator['margin'])
metric = ['accuracy']
print('DONE')
if args.resume_from is not None:
model.load_model(args.resume_from)
print('COMPILE MODEL')
model.model.compile(loss=losses,
optimizer=params_train['optimizer'],
metrics=metric)
if 'softmax' in cfg_params:
params_softmax = cfg_params['softmax']
params_save_paths = cfg_params['general']
pretrain_backbone_softmax(model.backbone_model,
data_loader,
params_softmax,
params_save_paths)
history = model.model.fit_generator(train_generator,
validation_data=val_generator,
epochs=params_train['n_epochs'],
callbacks=callbacks,
verbose=1,
use_multiprocessing=False)
if params_train['plot_history']:
plot_grapths(history, plots_save_path)
if __name__ == '__main__':
main()
| [
"embedding_net.models.TripletNet",
"embedding_net.backbones.pretrain_backbone_softmax",
"wandb.init",
"tensorflow.keras.callbacks.EarlyStopping",
"sys.path.append",
"argparse.ArgumentParser",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"embedding_net.losses_and_accuracies.triplet_loss",
"wandb.k... | [((87, 112), 'os.path.dirname', 'os.path.dirname', (['BASE_DIR'], {}), '(BASE_DIR)\n', (102, 112), False, 'import os\n'), ((113, 138), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (128, 138), False, 'import sys\n'), ((49, 74), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (64, 74), False, 'import os\n'), ((906, 966), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a classificator"""'}), "(description='Train a classificator')\n", (929, 966), False, 'import argparse\n'), ((1219, 1275), 'os.path.join', 'os.path.join', (["params['work_dir']", "params['project_name']"], {}), "(params['work_dir'], params['project_name'])\n", (1231, 1275), False, 'import os\n'), ((1300, 1339), 'os.path.join', 'os.path.join', (['work_dir_path', '"""weights/"""'], {}), "(work_dir_path, 'weights/')\n", (1312, 1339), False, 'import os\n'), ((1375, 1432), 'os.path.join', 'os.path.join', (['work_dir_path', '"""pretraining_model/weights/"""'], {}), "(work_dir_path, 'pretraining_model/weights/')\n", (1387, 1432), False, 'import os\n'), ((1459, 1500), 'os.path.join', 'os.path.join', (['work_dir_path', '"""encodings/"""'], {}), "(work_dir_path, 'encodings/')\n", (1471, 1500), False, 'import os\n'), ((1523, 1560), 'os.path.join', 'os.path.join', (['work_dir_path', '"""plots/"""'], {}), "(work_dir_path, 'plots/')\n", (1535, 1560), False, 'import os\n'), ((1589, 1627), 'os.path.join', 'os.path.join', (['work_dir_path', '"""tf_log/"""'], {}), "(work_dir_path, 'tf_log/')\n", (1601, 1627), False, 'import os\n'), ((1667, 1723), 'os.path.join', 'os.path.join', (['work_dir_path', '"""pretraining_model/tf_log/"""'], {}), "(work_dir_path, 'pretraining_model/tf_log/')\n", (1679, 1723), False, 'import os\n'), ((1753, 1815), 'os.path.join', 'os.path.join', (['weights_save_path', "('epoch_{epoch:03d}' + '.hdf5')"], {}), "(weights_save_path, 'epoch_{epoch:03d}' + '.hdf5')\n", (1765, 1815), False, 'import os\n'), ((1821, 1862), 'os.makedirs', 'os.makedirs', (['work_dir_path'], {'exist_ok': '(True)'}), '(work_dir_path, exist_ok=True)\n', (1832, 1862), False, 'import os\n'), ((1868, 1913), 'os.makedirs', 'os.makedirs', (['weights_save_path'], {'exist_ok': '(True)'}), '(weights_save_path, exist_ok=True)\n', (1879, 1913), False, 'import os\n'), ((1918, 1974), 'os.makedirs', 'os.makedirs', (['weights_pretrained_save_path'], {'exist_ok': '(True)'}), '(weights_pretrained_save_path, exist_ok=True)\n', (1929, 1974), False, 'import os\n'), ((1979, 2026), 'os.makedirs', 'os.makedirs', (['encodings_save_path'], {'exist_ok': '(True)'}), '(encodings_save_path, exist_ok=True)\n', (1990, 2026), False, 'import os\n'), ((2031, 2074), 'os.makedirs', 'os.makedirs', (['plots_save_path'], {'exist_ok': '(True)'}), '(plots_save_path, exist_ok=True)\n', (2042, 2074), False, 'import os\n'), ((2079, 2139), 'os.makedirs', 'os.makedirs', (['tensorboard_pretrained_save_path'], {'exist_ok': '(True)'}), '(tensorboard_pretrained_save_path, exist_ok=True)\n', (2090, 2139), False, 'import os\n'), ((2298, 2323), 'embedding_net.utils.parse_params', 'parse_params', (['args.config'], {}), '(args.config)\n', (2310, 2323), False, 'from embedding_net.utils import parse_params, plot_grapths\n'), ((2633, 2724), 'os.path.join', 'os.path.join', (["cfg_params['general']['work_dir']", "cfg_params['general']['project_name']"], {}), "(cfg_params['general']['work_dir'], cfg_params['general'][\n 'project_name'])\n", (2645, 2724), False, 'import os\n'), ((2777, 2816), 'os.path.join', 'os.path.join', (['work_dir_path', '"""weights/"""'], {}), "(work_dir_path, 'weights/')\n", (2789, 2816), False, 'import os\n'), ((3734, 3767), 'embedding_net.datagenerators.ENDataLoader', 'ENDataLoader', ([], {}), '(**params_dataloader)\n', (3746, 3767), False, 'from embedding_net.datagenerators import ENDataLoader, SimpleDataGenerator, TripletsDataGenerator, SimpleTripletsDataGenerator, SiameseDataGenerator\n'), ((3262, 3340), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': 'callback_monitor', 'factor': '(0.1)', 'patience': '(4)', 'verbose': '(1)'}), '(monitor=callback_monitor, factor=0.1, patience=4, verbose=1)\n', (3279, 3340), False, 'from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((3376, 3439), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'callback_monitor', 'patience': '(10)', 'verbose': '(1)'}), '(monitor=callback_monitor, patience=10, verbose=1)\n', (3389, 3439), False, 'from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((3494, 3604), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'weights_save_file_path', 'monitor': 'callback_monitor', 'save_best_only': '(True)', 'verbose': '(1)'}), '(filepath=weights_save_file_path, monitor=callback_monitor,\n save_best_only=True, verbose=1)\n', (3509, 3604), False, 'from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((4049, 4061), 'wandb.init', 'wandb.init', ([], {}), '()\n', (4059, 4061), False, 'import wandb\n'), ((4284, 4321), 'embedding_net.models.SiameseNet', 'SiameseNet', (['cfg_params'], {'training': '(True)'}), '(cfg_params, training=True)\n', (4294, 4321), False, 'from embedding_net.models import EmbeddingNet, TripletNet, SiameseNet\n'), ((4348, 4472), 'embedding_net.datagenerators.SiameseDataGenerator', 'SiameseDataGenerator', ([], {'class_files_paths': 'data_loader.train_data', 'class_names': 'data_loader.class_names'}), '(class_files_paths=data_loader.train_data, class_names=\n data_loader.class_names, **params_generator)\n', (4368, 4472), False, 'from embedding_net.datagenerators import ENDataLoader, SimpleDataGenerator, TripletsDataGenerator, SimpleTripletsDataGenerator, SiameseDataGenerator\n'), ((5612, 5649), 'embedding_net.models.TripletNet', 'TripletNet', (['cfg_params'], {'training': '(True)'}), '(cfg_params, training=True)\n', (5622, 5649), False, 'from embedding_net.models import EmbeddingNet, TripletNet, SiameseNet\n'), ((5963, 6127), 'embedding_net.datagenerators.TripletsDataGenerator', 'TripletsDataGenerator', ([], {'embedding_model': 'model.base_model', 'class_files_paths': 'data_loader.train_data', 'class_names': 'data_loader.class_names'}), '(embedding_model=model.base_model, class_files_paths=\n data_loader.train_data, class_names=data_loader.class_names, **\n params_generator)\n', (5984, 6127), False, 'from embedding_net.datagenerators import ENDataLoader, SimpleDataGenerator, TripletsDataGenerator, SimpleTripletsDataGenerator, SiameseDataGenerator\n'), ((6528, 6568), 'embedding_net.losses_and_accuracies.triplet_loss', 'triplet_loss', (["params_generator['margin']"], {}), "(params_generator['margin'])\n", (6540, 6568), False, 'from embedding_net.losses_and_accuracies import contrastive_loss, triplet_loss, accuracy\n'), ((7009, 7108), 'embedding_net.backbones.pretrain_backbone_softmax', 'pretrain_backbone_softmax', (['model.backbone_model', 'data_loader', 'params_softmax', 'params_save_paths'], {}), '(model.backbone_model, data_loader, params_softmax,\n params_save_paths)\n', (7034, 7108), False, 'from embedding_net.backbones import pretrain_backbone_softmax\n'), ((7601, 7639), 'embedding_net.utils.plot_grapths', 'plot_grapths', (['history', 'plots_save_path'], {}), '(history, plots_save_path)\n', (7613, 7639), False, 'from embedding_net.utils import parse_params, plot_grapths\n'), ((3881, 3923), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'tensorboard_save_path'}), '(log_dir=tensorboard_save_path)\n', (3892, 3923), False, 'from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler\n'), ((4088, 4152), 'wandb.keras.WandbCallback', 'WandbCallback', ([], {'data_type': '"""image"""', 'labels': 'data_loader.class_names'}), "(data_type='image', labels=data_loader.class_names)\n", (4101, 4152), False, 'from wandb.keras import WandbCallback\n'), ((4623, 4759), 'embedding_net.datagenerators.SiameseDataGenerator', 'SiameseDataGenerator', ([], {'class_files_paths': 'data_loader.val_data', 'class_names': 'data_loader.class_names', 'val_gen': '(True)'}), '(class_files_paths=data_loader.val_data, class_names=\n data_loader.class_names, val_gen=True, **params_generator)\n', (4643, 4759), False, 'from embedding_net.datagenerators import ENDataLoader, SimpleDataGenerator, TripletsDataGenerator, SimpleTripletsDataGenerator, SiameseDataGenerator\n'), ((5693, 5725), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (5723, 5725), True, 'import tensorflow as tf\n'), ((6312, 6410), 'embedding_net.datagenerators.SimpleTripletsDataGenerator', 'SimpleTripletsDataGenerator', (['data_loader.val_data', 'data_loader.class_names'], {}), '(data_loader.val_data, data_loader.class_names,\n **params_generator)\n', (6339, 6410), False, 'from embedding_net.datagenerators import ENDataLoader, SimpleDataGenerator, TripletsDataGenerator, SimpleTripletsDataGenerator, SiameseDataGenerator\n'), ((5796, 5841), 'tensorflow.keras.utils.multi_gpu_model', 'multi_gpu_model', (['model.base_model'], {'gpus': 'n_gpu'}), '(model.base_model, gpus=n_gpu)\n', (5811, 5841), False, 'from tensorflow.keras.utils import multi_gpu_model\n'), ((3230, 3253), 'numpy.floor', 'np.floor', (['(x / step_size)'], {}), '(x / step_size)\n', (3238, 3253), True, 'import numpy as np\n')] |
"""
Load datasets
"""
import PIL.Image
import glob
import numpy as np
import pandas as pd
import os.path
import torch.utils.data as TD
from sklearn.model_selection import train_test_split
class GlobImageDir(TD.Dataset):
"""Load a dataset of files using a glob expression and Python Pillow
library (PIL), and run optional transform func
>>> GlobDir("./data/**/*.png") # fetch PNG images recursively under ./data
>>> GlobDir("./data/*/*.png") # fetch images from the grandchild dirs
>>> GlobDir("*.png", mytranform_fn) # fetch and transform PNG files
"""
def __init__(self, glob_expr, transform=None):
self.fps = glob.glob(glob_expr, recursive=True)
self.transform = transform
def __len__(self):
return len(self.fps)
def __getitem__(self, index):
fp = self.fps[index]
with PIL.Image.open(fp) as im:
if self.transform:
im = self.transform(im)
return {'image': im, 'fp': fp}
class Messidor(GlobImageDir):
"""Load Messidor Dataset, applying given transforms.
getitem_transform - If None, will return a dictionary with various values.
img_transform - How to
A common usage looks like this:
>>> messidor = Messidor(
"./data/messidor/*.csv",
"./data/messidor/**/*.tif",
img_transform=tvt.Compose([
tvt.RandomCrop((512, 512)),
tvt.ToTensor(),
]),
getitem_transform=lambda x: (
x['image'],
torch.tensor([int(x['Retinopathy grade'] != 0)]))
)
"""
def __init__(self, csv_glob_expr, img_glob_expr,
img_transform=None, getitem_transform=None):
super().__init__(img_glob_expr, img_transform)
self.getitem_transform = getitem_transform
self.csv_data = pd.concat([
pd.read_csv(x) for x in glob.glob(csv_glob_expr, recursive=True)])\
.set_index('Image name')
assert self.csv_data.shape[0] == len(self.fps) # sanity check
self.shape_data = None # populate this requires pass through all imgs
def __getitem__(self, index, getitem_transform=True):
sample = super().__getitem__(index)
fname = os.path.basename(sample['fp'])
sample.update(dict(self.csv_data.loc[fname]))
if getitem_transform and self.getitem_transform is not None:
return self.getitem_transform(sample)
else:
return sample
def getitem_no_transform(self, index):
"""Apply the image transform, but not the getitem_transform.
Return a dict
"""
return self.__getitem__(index, False)
def train_test_split(self, train_frac, random_state=None):
"""
Train test split and STRATIFY across the Opthalmologic departments that
the images came from because the dimensions of images from each
department are different.
train_frac: a value in [0, 1]
random_state: passed to sklearn.model_selection.train_test_split
"""
# input num samples
N = len(self)
train_idxs, val_idxs = train_test_split(
np.arange(N), train_size=train_frac,
stratify=self.csv_data['Ophthalmologic department'].values)
return train_idxs, val_idxs
def fetch_img_dims(self):
"""
Iteratively load all images in dataset and store their shape
in a dataframe. Useful for analysis. Takes a minute or so.
# # file dimensions are not uniform.
# # base 1 and base 2 have unique dimension.
# # base 3 has 2 different dimensions.
# df.groupby(['base', 'x', 'y', 'z'])['fp'].count()
"""
df = pd.DataFrame(
{fp: list(self[i].shape)
for i, fp in zip(range(len(self.fps)), self.fps)})
df.columns = ['fp', 'x', 'y', 'z']
df = pd.concat([df, df['fp'].str.extract(
r'/Base(?P<base>\d)(?P<base2>\d)/').astype('int')], axis=1)
df['Image name'] = df['fp'].apply(os.path.basename)
df.set_index('Image name')
return df
if __name__ == "__main__":
messidor = Messidor(
"./data/messidor/*.csv",
"./data/messidor/**/*.tif",
img_transform=lambda x: x.getdata()
)
z = messidor[0]
print(np.array(z['image']).shape)
| [
"numpy.array",
"pandas.read_csv",
"glob.glob",
"numpy.arange"
] | [((654, 690), 'glob.glob', 'glob.glob', (['glob_expr'], {'recursive': '(True)'}), '(glob_expr, recursive=True)\n', (663, 690), False, 'import glob\n'), ((3204, 3216), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3213, 3216), True, 'import numpy as np\n'), ((4361, 4381), 'numpy.array', 'np.array', (["z['image']"], {}), "(z['image'])\n", (4369, 4381), True, 'import numpy as np\n'), ((1895, 1909), 'pandas.read_csv', 'pd.read_csv', (['x'], {}), '(x)\n', (1906, 1909), True, 'import pandas as pd\n'), ((1919, 1959), 'glob.glob', 'glob.glob', (['csv_glob_expr'], {'recursive': '(True)'}), '(csv_glob_expr, recursive=True)\n', (1928, 1959), False, 'import glob\n')] |
import time
import numpy as np
import image_data_pipeline
import ni
##import thorlabs
from pco import pco_edge_camera_child_process
import pickle
def main():
# This incantation is forced on us so the IDP won't print everything twice:
import logging
import multiprocessing as mp
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
# Set parameters for IDP (Image Data Pipeline)
set_num_buffers = 3
image_height_pixels = 128
image_width_pixels = 380
# Set parameters for DAQ (analog out card)
num_daq_channels = 3
daq_rate = 8e5
##############################################################
# Set exposure parameters for camera and laser illumination: #
##############################################################
green_AOM_mV = [
300,
] #calibrated
green_powers = [
'1060mW',
]
red_AOM_mV = [
269,
] #calibrated
red_powers = [
'240mW',
]
angle_string = '14'
# Set laser pulse duration VERY SHORT
green_pulse_duration_pixels = 1
red_pulse_duration_pixels = 1
# Set green pulse train repetition time short enough to
# thermally stabilize the sample
green_rep_time_us = 600
green_rep_time_pixels = int(np.ceil(
green_rep_time_us * 1e-6 * daq_rate))
# how many red laser shots in an exposure?
pulses_per_exposure = 8
# you don't want red light leaking into next exposure so set this to
# 1 if you're imaging 720 nm.
# set to zero if you're looking for depletion, because you need
# every green pulse matched with a red for that measurement
less_red_pulses = 0
desired_effective_exposure_time_pixels = (green_rep_time_pixels *
pulses_per_exposure)
assert desired_effective_exposure_time_pixels > 0
#define red/green pulse delays
red_start_pixel_array = np.array([-2, 0, 2])
num_delays = red_start_pixel_array.shape[0]
print('Red/green delay (us) =', red_start_pixel_array / daq_rate * 1e6)
# number of exposures should be the first dimension of the idp buffer
num_delay_scan_repetitions = 1
num_exposures = num_delays * num_delay_scan_repetitions
# actual roll time is 640 us, which should be a multiple of
# green_rep_time_us, but may not always be
# this only works for the current field of view height 128 pixels
# 10 us per line, rolling is symmetrical around middle of chip
rolling_time_us = 640 #experimentally determined for this field of view
rolling_time_pixels = int(np.ceil(
rolling_time_us * 1e-6 * daq_rate))
extra_time_after_roll_pixels = (green_rep_time_pixels -
rolling_time_pixels %
green_rep_time_pixels)
effective_exposure_time_pixels = (extra_time_after_roll_pixels +
desired_effective_exposure_time_pixels)
# reminder: negative delay values (red before green) are only valid if the
# camera roll finishes before the red pulse gets there
assert extra_time_after_roll_pixels > -min(red_start_pixel_array)
set_exposure_time_pixels = (rolling_time_pixels +
effective_exposure_time_pixels)
# set exposure time must be an integer multiple of green rep time
assert (set_exposure_time_pixels % green_rep_time_pixels) == 0
set_exposure_time_us = int(np.ceil(
set_exposure_time_pixels / daq_rate * 1e6))
# Initialize the IDP:
idp = image_data_pipeline.Image_Data_Pipeline(
num_buffers=set_num_buffers,
buffer_shape=(num_exposures, image_height_pixels, image_width_pixels),
camera_child_process=pco_edge_camera_child_process)
assert idp.buffer_shape[0] == num_exposures
# Initialize the DAQ:
daq = ni.PCI_6733(
num_channels=num_daq_channels,
rate=daq_rate,
verbose=True)
assert daq.rate == daq_rate
try:
# Apply camera settings:
idp.display.set_intensity_scaling('median_filter_autoscale')
idp.apply_camera_settings(
trigger='external_trigger',
exposure_time_microseconds = set_exposure_time_us,
region_of_interest ={'bottom': 1088,
'top': 961,
'left': 841,
'right': 1220},
preframes=0)
# UNCOMMON COMMAND: the daq voltage string can get very long, so
# Andy wrote a new part of pco.py that adjusts the set timeout
# for waiting for the FIRST camera trigger (Oct 4, 2016)
idp.camera.commands.send(('set_first_trigger_timeout_seconds',
{'first_trigger_timeout_seconds': 3}))
assert idp.camera.commands.recv() == 3 # clear command queue
# Figure out some basic timing information: This is what the
# camera thinks it's doing. Is it what we want it to do?
exposure_time_us = idp.camera.get_setting('exposure_time_microseconds')
print('I want exposure time to be (us)',set_exposure_time_us)
print('Exposure time actually is (us)',exposure_time_us)
assert exposure_time_us == set_exposure_time_us
rolling_time_us = idp.camera.get_setting('rolling_time_microseconds')
rolling_time_jitter_us = 15 #experimentally measured and also in spec
rolling_time_us += rolling_time_jitter_us
pulse_tail_us = 25 #experimentally measured response of buffer amp and AOM
print("\nCamera exposure time:", exposure_time_us, "(us)\n")
print("\nCamera rolling time:", rolling_time_us, "(us)\n")
effective_exposure_us = exposure_time_us - rolling_time_us
print("\nCamera effective exposure:", effective_exposure_us, "(us)\n")
for [red_voltage_num, my_red_voltage_mV] in enumerate(red_AOM_mV):
for [green_voltage_num, my_green_voltage_mV] in enumerate(green_AOM_mV):
# Calculate DAQ voltages
# Set voltages to play on analog out card
green_voltage = my_green_voltage_mV/1000
red_voltage = my_red_voltage_mV/1000
trig_voltage = 3
# time between exposures must be greater than camera trigger
# jitter and a multiple of the green rep time
# trigger jitter is about 10 us
time_between_exposures_pixels = 2 * green_rep_time_pixels
camera_rep_time_pixels = (set_exposure_time_pixels +
time_between_exposures_pixels)
camera_rep_time_us = camera_rep_time_pixels / daq_rate * 1e6
voltages = np.zeros((camera_rep_time_pixels * num_exposures,
num_daq_channels))
# green laser pulses on for the duration of the daq play
green_chunk = np.zeros(green_rep_time_pixels)
green_chunk[0:green_pulse_duration_pixels] = green_voltage
voltages[:,1] = np.tile(
green_chunk, int(voltages.shape[0]/green_rep_time_pixels))
# camera trigger duration should be 3us or greater
trigger_duration_us = 3
trigger_duration_pixels = int(np.ceil(
trigger_duration_us / 1e6 * daq_rate))
# loop used to define camera trigger and red laser pulse
# voltages
for which_exposure in range(num_exposures):
cursor = which_exposure * camera_rep_time_pixels
# Camera triggers:
voltages[cursor:cursor + trigger_duration_pixels, 0] = (
trig_voltage)
# Red laser pulses
red_start_pixel = (
red_start_pixel_array[which_exposure % num_delays])
red_series_start = (cursor +
rolling_time_pixels +
extra_time_after_roll_pixels +
red_start_pixel)
red_chunk = np.zeros(green_rep_time_pixels)
red_chunk[0:red_pulse_duration_pixels] = red_voltage
red_exposure_array = np.tile(red_chunk, (
pulses_per_exposure - less_red_pulses))
voltages[red_series_start:(red_series_start + red_exposure_array.shape[0]), 2] = red_exposure_array
# save voltages that will be sent to daq
with open('voltages_green_' + green_powers[green_voltage_num] +
'_red_' + red_powers[red_voltage_num] +
'_phase.pickle', 'wb') as f:
pickle.dump(voltages, f)
# Put it all together
idp.load_permission_slips(
num_slips=1,
file_saving_info=[
{'filename': (
'STE_phase_angle_' + angle_string +
'_green_' + green_powers[green_voltage_num] +
'_red_' + red_powers[red_voltage_num] +
'.tif'),
'channels': num_delays,
'slices': num_delay_scan_repetitions,
}])
daq.play_voltages(voltages, block=True)
finally:
# Shut everything down. This can be important!
daq.close()
idp.close()
if __name__ == '__main__':
main()
| [
"numpy.tile",
"multiprocessing.log_to_stderr",
"numpy.ceil",
"pickle.dump",
"numpy.array",
"numpy.zeros",
"image_data_pipeline.Image_Data_Pipeline",
"ni.PCI_6733"
] | [((320, 338), 'multiprocessing.log_to_stderr', 'mp.log_to_stderr', ([], {}), '()\n', (336, 338), True, 'import multiprocessing as mp\n'), ((2014, 2034), 'numpy.array', 'np.array', (['[-2, 0, 2]'], {}), '([-2, 0, 2])\n', (2022, 2034), True, 'import numpy as np\n'), ((3690, 3889), 'image_data_pipeline.Image_Data_Pipeline', 'image_data_pipeline.Image_Data_Pipeline', ([], {'num_buffers': 'set_num_buffers', 'buffer_shape': '(num_exposures, image_height_pixels, image_width_pixels)', 'camera_child_process': 'pco_edge_camera_child_process'}), '(num_buffers=set_num_buffers,\n buffer_shape=(num_exposures, image_height_pixels, image_width_pixels),\n camera_child_process=pco_edge_camera_child_process)\n', (3729, 3889), False, 'import image_data_pipeline\n'), ((4003, 4074), 'ni.PCI_6733', 'ni.PCI_6733', ([], {'num_channels': 'num_daq_channels', 'rate': 'daq_rate', 'verbose': '(True)'}), '(num_channels=num_daq_channels, rate=daq_rate, verbose=True)\n', (4014, 4074), False, 'import ni\n'), ((1344, 1389), 'numpy.ceil', 'np.ceil', (['(green_rep_time_us * 1e-06 * daq_rate)'], {}), '(green_rep_time_us * 1e-06 * daq_rate)\n', (1351, 1389), True, 'import numpy as np\n'), ((2695, 2738), 'numpy.ceil', 'np.ceil', (['(rolling_time_us * 1e-06 * daq_rate)'], {}), '(rolling_time_us * 1e-06 * daq_rate)\n', (2702, 2738), True, 'import numpy as np\n'), ((3580, 3636), 'numpy.ceil', 'np.ceil', (['(set_exposure_time_pixels / daq_rate * 1000000.0)'], {}), '(set_exposure_time_pixels / daq_rate * 1000000.0)\n', (3587, 3636), True, 'import numpy as np\n'), ((6970, 7038), 'numpy.zeros', 'np.zeros', (['(camera_rep_time_pixels * num_exposures, num_daq_channels)'], {}), '((camera_rep_time_pixels * num_exposures, num_daq_channels))\n', (6978, 7038), True, 'import numpy as np\n'), ((7184, 7215), 'numpy.zeros', 'np.zeros', (['green_rep_time_pixels'], {}), '(green_rep_time_pixels)\n', (7192, 7215), True, 'import numpy as np\n'), ((7572, 7623), 'numpy.ceil', 'np.ceil', (['(trigger_duration_us / 1000000.0 * daq_rate)'], {}), '(trigger_duration_us / 1000000.0 * daq_rate)\n', (7579, 7623), True, 'import numpy as np\n'), ((8467, 8498), 'numpy.zeros', 'np.zeros', (['green_rep_time_pixels'], {}), '(green_rep_time_pixels)\n', (8475, 8498), True, 'import numpy as np\n'), ((8617, 8674), 'numpy.tile', 'np.tile', (['red_chunk', '(pulses_per_exposure - less_red_pulses)'], {}), '(red_chunk, pulses_per_exposure - less_red_pulses)\n', (8624, 8674), True, 'import numpy as np\n'), ((9111, 9135), 'pickle.dump', 'pickle.dump', (['voltages', 'f'], {}), '(voltages, f)\n', (9122, 9135), False, 'import pickle\n')] |
import torch
from torchvision import transforms
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from utils.utils import *
import json
import cv2
import copy
import numpy as np
import os
from tool.darknet2pytorch import *
from tqdm import tqdm
from skimage import measure
from argparse import ArgumentParser
def adjust_bbox_size(bbox, rate, ori_rate):
# bbox [[left, top)] [right, down]], rate 缩放的比例 rate为2则是缩小两倍
# return bbox [(left, top), (right, down)] 缩放之后的
rate += 0.5 # 冗余,使面积之比在0.02以内
bbox[0][0] *= ori_rate
bbox[0][1] *= ori_rate
bbox[1][0] *= ori_rate
bbox[1][1] *= ori_rate
middle = (((bbox[1][0] - bbox[0][0]) / 2.0) + bbox[0][0],
((bbox[1][1] - bbox[0][1]) / 2.0) + bbox[0][1])
k = (bbox[1][1] - bbox[0][1]) / (bbox[1][0] - bbox[0][0])
# print(middle)
distance = middle[0] - bbox[0][0]
# print("原bbox:", bbox)
if distance > rate:
distance /= rate
x_left = (middle[0] - distance)
x_right = (middle[0] + distance)
y_left = (k * (x_left - middle[0]) + middle[1])
y_right = (k * (x_right - middle[0]) + middle[1])
# print("调整之后的bbox:", (int(x_left), int(y_left)), (int(x_right), int(y_right)))
# print("面积改变的比例:", pow((x_right - x_left) / (bbox[1][0] - bbox[0][0]), 2))
return [(int(x_left), int(y_left)), (int(x_right), int(y_right))]
else:
return -1 # bbox太小了 放弃该bbox的优化
def attack_imgs_yolov4(root_path, imgs):
cfgfile = "models/yolov4.cfg"
weightfile = "models/yolov4.weights"
darknet_model = Darknet(cfgfile)
darknet_model.load_weights(weightfile)
darknet_model = darknet_model.eval().cuda()
for img in imgs:
img_path = os.path.join(root_path, img)
original_img = None
adversarial_degree = 255.
noise = None
momentum = 1.0
min_bbox_num = 999 # 最少的检测框数量
ori_bbox_num = None
attack_map = None # 攻击的范围
for attack_iter in range(500):
if attack_iter != 0:
img = im
else:
img = Image.open(img_path).convert('RGB')
img_copy = copy.deepcopy(img) # 若本次结果最好,则保存本次结果
resize_small = transforms.Compose([
transforms.Resize((608, 608)),
])
img = resize_small(img)
if original_img is None:
original_img = cv2.imread(img_path)
original_img = np.array(original_img, dtype = np.int16)
clip_min = np.clip(original_img - adversarial_degree, 0, 255)
clip_max = np.clip(original_img + adversarial_degree, 0, 255)
boxes, grad = do_attack(darknet_model, img_path, img, original_img, 0.5, 0.4, True)
if attack_map is None:
width = original_img.shape[0] # 原图大小不同 需要改
height = original_img.shape[1] # 原图大小不同 需要改
detection_map = np.zeros(original_img.shape[:2])
for box in boxes:
x1 = min(max(int((box[0] - box[2] / 2.0) * width), 0), 500) # 原图大小不同 需要改
y1 = min(max(int((box[1] - box[3] / 2.0) * height), 0), 500) # 原图大小不同 需要改
x2 = min(max(int((box[0] + box[2] / 2.0) * width), 0), 500) # 原图大小不同 需要改
y2 = min(max(int((box[1] + box[3] / 2.0) * height), 0), 500) # 原图大小不同 需要改
detection_map[x1:x2, y1:y2] += 1
rate = detection_map[detection_map!=0].sum() / detection_map.size # 计算检测框面积(可叠加)占据原图面积之比,比例用作下面缩小检测框
print("检测框面积与原图面积之比:{},需要缩小{}倍。".format(rate, math.sqrt(rate/0.02)))
attack_map = np.zeros(original_img.shape[:2])
attack_area_num = 0
for box in boxes:
x1 = min(max(int((box[0] - box[2] / 2.0) * width), 0), 500) # 原图大小不同 需要改
y1 = min(max(int((box[1] - box[3] / 2.0) * height), 0), 500) # 原图大小不同 需要改
x2 = min(max(int((box[0] + box[2] / 2.0) * width), 0), 500) # 原图大小不同 需要改
y2 = min(max(int((box[1] + box[3] / 2.0) * height), 0), 500) # 原图大小不同 需要改
if attack_area_num >= 10:
break
adjust_bbox = adjust_bbox_size([[y1, x1], [y2, x2]], math.sqrt(rate/0.02), ori_rate=1)
if adjust_bbox != -1:
attack_area_num += 1
attack_map[adjust_bbox[0][0]:adjust_bbox[1][0], adjust_bbox[0][1]:adjust_bbox[1][1]] =1
# attack_map[y1:y2, x1:x2] =1
attack_rate = attack_map[attack_map==1].size / attack_map.size
attack_map = np.stack((attack_map, attack_map, attack_map),axis=-1)
print("攻击区域面积与原图面积之比:{}".format(attack_rate))
if ori_bbox_num is None:
ori_bbox_num = len(boxes)
if len(boxes) <= min_bbox_num:
min_bbox_num = len(boxes) # 寻找最少的检测框
attack_image = img_copy
print('攻击次数', attack_iter, '最初检测框的数量:', ori_bbox_num, '当前最少的检测框数量:', min_bbox_num, '当前的检测框数量:', len(boxes))
if noise is None:
noise = torch.sign(grad).squeeze(0).numpy().transpose(1, 2, 0)
noise = cv2.resize(noise, original_img.shape[:2],interpolation=cv2.INTER_CUBIC)
else:
temp_noise = torch.sign(grad).squeeze(0).numpy().transpose(1, 2, 0)
temp_noise = cv2.resize(temp_noise, original_img.shape[:2],interpolation=cv2.INTER_CUBIC)
noise = momentum * noise + temp_noise
img = cv2.cvtColor(np.asarray(img_copy),cv2.COLOR_RGB2BGR)
img = np.clip(img + noise * attack_map, clip_min, clip_max).astype(np.uint8)
im = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# im.save(img_path)
attack_image.save(img_path)
def inference_single_attack(img_path, darknet_model, img, img_cv2):
img_PIL = Image.open(img_path).convert('RGB')
original_img = copy.deepcopy(img_PIL)
# resize_small = transforms.Compose([
# transforms.Resize((608, 608)),
# ])
# img = resize_small(img)
# img = cv2.resize(img, (608, 608))
# img = cv2.resize(img, (608, 608),interpolation=cv2.INTER_CUBIC) # 更改数据集需要改大小
boxes, grad = do_attack(darknet_model, img_path, img, original_img, 0.5, 0.4, img_cv2, True)
# print(grad)
grad = grad / torch.norm(grad,p=1)
noise = grad.squeeze(0).numpy().transpose(1, 2, 0)
# noise = cv2.resize(noise, (500, 500),interpolation=cv2.INTER_CUBIC) # 更改数据集需要改大小
return noise, boxes
def inference_detector_yolov4(darknet_model, img):
# img = Image.open(img_path).convert('RGB')
resize_small = transforms.Compose([
transforms.Resize((608, 608)),
])
img = resize_small(img)
# print(np.array(img))
boxes = do_detect(darknet_model, img, 0.5, 0.4, True)
return boxes
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('gpu', help='different task for different gpu')
args = parser.parse_args()
gpu = int(args.gpu)
root_path = './images/'
imgs = os.listdir(root_path)[125*gpu:125*(gpu+1)]
print(len(imgs))
attack_imgs_yolov4(root_path, imgs)
| [
"numpy.clip",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"numpy.asarray",
"torch.sign",
"numpy.array",
"torch.norm",
"numpy.zeros",
"numpy.stack",
"cv2.cvtColor",
"copy.deepcopy",
"torchvision.transforms.Resize",
"cv2.resize",
"cv2.imread"
] | [((6118, 6140), 'copy.deepcopy', 'copy.deepcopy', (['img_PIL'], {}), '(img_PIL)\n', (6131, 6140), False, 'import copy\n'), ((7085, 7101), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (7099, 7101), False, 'from argparse import ArgumentParser\n'), ((1731, 1759), 'os.path.join', 'os.path.join', (['root_path', 'img'], {}), '(root_path, img)\n', (1743, 1759), False, 'import os\n'), ((6536, 6557), 'torch.norm', 'torch.norm', (['grad'], {'p': '(1)'}), '(grad, p=1)\n', (6546, 6557), False, 'import torch\n'), ((7268, 7289), 'os.listdir', 'os.listdir', (['root_path'], {}), '(root_path)\n', (7278, 7289), False, 'import os\n'), ((2163, 2181), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (2176, 2181), False, 'import copy\n'), ((6871, 6900), 'torchvision.transforms.Resize', 'transforms.Resize', (['(608, 608)'], {}), '((608, 608))\n', (6888, 6900), False, 'from torchvision import transforms\n'), ((2415, 2435), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (2425, 2435), False, 'import cv2\n'), ((2467, 2505), 'numpy.array', 'np.array', (['original_img'], {'dtype': 'np.int16'}), '(original_img, dtype=np.int16)\n', (2475, 2505), True, 'import numpy as np\n'), ((2535, 2585), 'numpy.clip', 'np.clip', (['(original_img - adversarial_degree)', '(0)', '(255)'], {}), '(original_img - adversarial_degree, 0, 255)\n', (2542, 2585), True, 'import numpy as np\n'), ((2613, 2663), 'numpy.clip', 'np.clip', (['(original_img + adversarial_degree)', '(0)', '(255)'], {}), '(original_img + adversarial_degree, 0, 255)\n', (2620, 2663), True, 'import numpy as np\n'), ((2947, 2979), 'numpy.zeros', 'np.zeros', (['original_img.shape[:2]'], {}), '(original_img.shape[:2])\n', (2955, 2979), True, 'import numpy as np\n'), ((3701, 3733), 'numpy.zeros', 'np.zeros', (['original_img.shape[:2]'], {}), '(original_img.shape[:2])\n', (3709, 3733), True, 'import numpy as np\n'), ((4740, 4795), 'numpy.stack', 'np.stack', (['(attack_map, attack_map, attack_map)'], {'axis': '(-1)'}), '((attack_map, attack_map, attack_map), axis=-1)\n', (4748, 4795), True, 'import numpy as np\n'), ((5327, 5399), 'cv2.resize', 'cv2.resize', (['noise', 'original_img.shape[:2]'], {'interpolation': 'cv2.INTER_CUBIC'}), '(noise, original_img.shape[:2], interpolation=cv2.INTER_CUBIC)\n', (5337, 5399), False, 'import cv2\n'), ((5533, 5610), 'cv2.resize', 'cv2.resize', (['temp_noise', 'original_img.shape[:2]'], {'interpolation': 'cv2.INTER_CUBIC'}), '(temp_noise, original_img.shape[:2], interpolation=cv2.INTER_CUBIC)\n', (5543, 5610), False, 'import cv2\n'), ((5699, 5719), 'numpy.asarray', 'np.asarray', (['img_copy'], {}), '(img_copy)\n', (5709, 5719), True, 'import numpy as np\n'), ((5863, 5899), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (5875, 5899), False, 'import cv2\n'), ((2264, 2293), 'torchvision.transforms.Resize', 'transforms.Resize', (['(608, 608)'], {}), '((608, 608))\n', (2281, 2293), False, 'from torchvision import transforms\n'), ((5759, 5812), 'numpy.clip', 'np.clip', (['(img + noise * attack_map)', 'clip_min', 'clip_max'], {}), '(img + noise * attack_map, clip_min, clip_max)\n', (5766, 5812), True, 'import numpy as np\n'), ((5248, 5264), 'torch.sign', 'torch.sign', (['grad'], {}), '(grad)\n', (5258, 5264), False, 'import torch\n'), ((5449, 5465), 'torch.sign', 'torch.sign', (['grad'], {}), '(grad)\n', (5459, 5465), False, 'import torch\n')] |
# *******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# *******************************************************************************
# daal4py DecisionTree scikit-learn-compatible estimator classes
import numpy as np
import numbers
import warnings
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.utils.validation import (
check_X_y, check_array, check_is_fitted, check_consistent_length)
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils import check_random_state
import daal4py as d4p
from .._utils import (make2d, getFPType)
from scipy.sparse import issparse
from sklearn import __version__ as sklearn_version
from distutils.version import LooseVersion
_supported_dtypes_ = [np.single, np.double]
class DecisionTreeClassifier(BaseEstimator, ClassifierMixin):
"""
Decision tree classifier powered by Intel(R) DAAL.
https://software.intel.com/en-us/daal-programming-guide-decision-tree-2
https://software.intel.com/en-us/daal-programming-guide-batch-processing-50
Parameters
----------
max_depth : int or None, default=None
Depth of the tree fitten to data. None corresponds to unlimited depth.
min_observations_in_leaf_node : int, default=10
The number of estimators in the ensemble.
estimator_params : list of str, default=tuple()
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
Training:
inputs: dataForPruning, labelsForPruning
parameters: fptype, method, nClasses, splitCriterion, pruning, maxTreeDepth, minObservationsInLeafNodes
Prediction:
parameters: fptype, method, nBins, nClasses, resultsToEvaluate (computeClassesLabels|computeClassProbabilities)
N.B.: The only supported value for current version of the library is nBins=1.
nBins is the number of bins used to compute probabilities of the observations belonging to the class.
"""
def __init__(self, max_depth=None, min_observations_in_leaf_node=1, split_criterion='gini'):
self.max_depth = max_depth
self.min_observations_in_leaf_node = min_observations_in_leaf_node
self.split_criterion = split_criterion
def _daal4py_fit(self, X, y, w, pruning_set=None):
X_fptype = getFPType(X)
X = make2d(X)
y = make2d(y)
if pruning_set is None:
_pruning="none"
_pruning_X = None
_pruning_y = None
else:
_pruning="reducedErrorPruning"
if isinstance(pruning_set, (tuple, list)) and len(pruning_set)==2:
_pruning_X, _pruning_y = pruning_set
check_consistent_length(_pruning_X, _pruning_y)
_pruning_X = make2d(_pruning_X)
_pruning_y = make2d(_pruning_y)
else:
raise ValueError("pruning_set parameter is expected to be a tuple of pruning features and pruning dependent variables")
if w is not None:
w = make2d(np.asarray(w))
daal_max_tree_depth = 0 if (self.max_depth is None) else int(self.max_depth) + 1
alg = d4p.decision_tree_classification_training(
fptype=X_fptype,
method="defaultDense",
nClasses=int(self.n_classes_),
splitCriterion=self.split_criterion,
maxTreeDepth=daal_max_tree_depth,
minObservationsInLeafNodes=int(self.min_observations_in_leaf_node),
pruning=_pruning)
res = alg.compute(X, y,
dataForPruning=_pruning_X,
labelsForPruning=_pruning_y,
weights=w)
self.daal_model_ = res.model
self._cached_tree_state_ = None
def _get_tree_state(self):
"""
Internal utility that returns an array behind scikit-learn's tree object
from daal_model_ produced by call to fit
"""
check_is_fitted(self, ['daal_model_', '_cached_tree_state_'])
if self._cached_tree_state_ is None:
tree_state_class = d4p.getTreeState(self.daal_model_, int(self.n_classes_))
self._cached_tree_state_ = tree_state_class
return self._cached_tree_state_
def get_n_leaves(self):
ts = self._get_tree_state()
return ts.leaf_count
def get_depth(self):
ts = self._get_tree_state()
return ts.max_depth
def fit(self, X, y, sample_weight=None, pruning_set=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float64`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
pruning_set: None or a tuple of (X, y) corrsponding to features and
associated labels used for tree pruning. See [1] for more details.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
[1] https://software.intel.com/en-us/daal-programming-guide-decision-tree-2
"""
if not self.split_criterion in ('gini', 'infoGain'):
raise ValueError('Parameter "split_criterion" must be '
'"gini" or "infoGain".')
if not (isinstance(self.max_depth, numbers.Integral)
and (self.max_depth >= 0)):
if self.max_depth is not None:
raise ValueError('Parameter "max_depth" must be '
'a non-negative integer value or None.')
if not (isinstance(self.min_observations_in_leaf_node, numbers.Integral)
and (self.min_observations_in_leaf_node > 0)):
raise ValueError('Parameter "min_observations_in_leaf_node" must be '
'non-zero positive integer value.')
X = check_array(X, dtype=_supported_dtypes_)
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
check_consistent_length(X, y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if self.n_outputs_ != 1:
_class_name = self.__class__.__name__
raise ValueError(_class_name +
" does not currently support multi-output data. " +
"Consider using OneHotEncoder")
y = check_array(y, ensure_2d=False, dtype=None)
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = \
np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.n_features_ = X.shape[1]
if self.n_classes_ < 2:
raise ValueError("Training data only contain information about one class.")
self._daal4py_fit(X, y, sample_weight, pruning_set=pruning_set)
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if check_input:
X = check_array(X, dtype=_supported_dtypes_, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def _daal4py_predict(self, X):
fptype = getFPType(X)
alg = d4p.decision_tree_classification_prediction(
fptype=fptype,
method="defaultDense",
nBins=1,
nClasses=self.n_classes_,
resultsToEvaluate="computeClassLabels")
res = alg.compute(X, self.daal_model_)
return res.prediction.ravel()
def predict(self, X, check_input=True):
check_is_fitted(self, 'daal_model_')
X = self._validate_X_predict(X, check_input)
y = self._daal4py_predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp), axis=0)
def predict_proba(self, X, check_input=True):
check_is_fitted(self, 'daal_model_')
X = self._validate_X_predict(X, check_input)
y = self._daal4py_predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp), axis=0)
| [
"sklearn.utils.validation.check_is_fitted",
"numpy.copy",
"sklearn.utils.validation.check_array",
"sklearn.utils.multiclass.check_classification_targets",
"numpy.reshape",
"numpy.unique",
"numpy.asarray",
"scipy.sparse.issparse",
"sklearn.utils.validation.check_consistent_length",
"numpy.zeros",
... | [((4984, 5045), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', "['daal_model_', '_cached_tree_state_']"], {}), "(self, ['daal_model_', '_cached_tree_state_'])\n", (4999, 5045), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((7620, 7660), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {'dtype': '_supported_dtypes_'}), '(X, dtype=_supported_dtypes_)\n', (7631, 7660), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((7673, 7686), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7683, 7686), True, 'import numpy as np\n'), ((7699, 7715), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (7712, 7715), True, 'import numpy as np\n'), ((8025, 8054), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X', 'y'], {}), '(X, y)\n', (8048, 8054), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((8571, 8614), 'sklearn.utils.validation.check_array', 'check_array', (['y'], {'ensure_2d': '(False)', 'dtype': 'None'}), '(y, ensure_2d=False, dtype=None)\n', (8582, 8614), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((8623, 8654), 'sklearn.utils.multiclass.check_classification_targets', 'check_classification_targets', (['y'], {}), '(y)\n', (8651, 8654), False, 'from sklearn.utils.multiclass import check_classification_targets\n'), ((8668, 8678), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (8675, 8678), True, 'import numpy as np\n'), ((8770, 8801), 'numpy.zeros', 'np.zeros', (['y.shape'], {'dtype': 'np.int'}), '(y.shape, dtype=np.int)\n', (8778, 8801), True, 'import numpy as np\n'), ((10345, 10511), 'daal4py.decision_tree_classification_prediction', 'd4p.decision_tree_classification_prediction', ([], {'fptype': 'fptype', 'method': '"""defaultDense"""', 'nBins': '(1)', 'nClasses': 'self.n_classes_', 'resultsToEvaluate': '"""computeClassLabels"""'}), "(fptype=fptype, method=\n 'defaultDense', nBins=1, nClasses=self.n_classes_, resultsToEvaluate=\n 'computeClassLabels')\n", (10388, 10511), True, 'import daal4py as d4p\n'), ((10702, 10738), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""daal_model_"""'], {}), "(self, 'daal_model_')\n", (10717, 10738), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((10960, 10996), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""daal_model_"""'], {}), "(self, 'daal_model_')\n", (10975, 10996), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((7773, 7968), 'warnings.warn', 'warnings.warn', (['"""A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel()."""', 'DataConversionWarning'], {'stacklevel': '(2)'}), "(\n 'A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples,), for example using ravel().'\n , DataConversionWarning, stacklevel=2)\n", (7786, 7968), False, 'import warnings\n'), ((8219, 8241), 'numpy.reshape', 'np.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (8229, 8241), True, 'import numpy as np\n'), ((8915, 8954), 'numpy.unique', 'np.unique', (['y[:, k]'], {'return_inverse': '(True)'}), '(y[:, k], return_inverse=True)\n', (8924, 8954), True, 'import numpy as np\n'), ((9600, 9661), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {'dtype': '_supported_dtypes_', 'accept_sparse': '"""csr"""'}), "(X, dtype=_supported_dtypes_, accept_sparse='csr')\n", (9611, 9661), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((10863, 10891), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': 'np.intp'}), '(y, dtype=np.intp)\n', (10873, 10891), True, 'import numpy as np\n'), ((11121, 11149), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': 'np.intp'}), '(y, dtype=np.intp)\n', (11131, 11149), True, 'import numpy as np\n'), ((3713, 3760), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['_pruning_X', '_pruning_y'], {}), '(_pruning_X, _pruning_y)\n', (3736, 3760), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted, check_consistent_length\n'), ((4061, 4074), 'numpy.asarray', 'np.asarray', (['w'], {}), '(w)\n', (4071, 4074), True, 'import numpy as np\n'), ((9677, 9688), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (9685, 9688), False, 'from scipy.sparse import issparse\n')] |
# Generate static graphs
import os
import sys
import json
import csv
import plotly.graph_objects as go
import plotly.express as px
import numpy as np
from plotly.subplots import make_subplots
from datetime import *
from utils import *
# Generate a graph (cases) for Maryland Zip Code
# Here we pass all the data
def generate_MD_zip_graph_with_avg(data,name,folder,_color):
if(len(data['stats'])>0):
all_x = []
all_y = []
all_x_avg = []
all_y_avg = []
# 7 days average
first_val = -1
total_day = 0
max_day = 7 # Average based on max_day days
_type = "cases"
tempValForAvg = []
tempValFormax_day = []
for d in data['stats']:
for day in d:
# Warning - it looks like in Maryland, they put a lot of cases on the first day in the stats
# so we ignore the data in the graphs to have something legible
d_day = day.split('-')
b1 = date(int(d_day[0]), int(d_day[1]), int(d_day[2]))
if(b1 > date(2020, 4, 12)):
# Org Data
all_x.append(day)
all_y.append(d[day][_type])
# For average of _type
tempValForAvg.append(float(d[day][_type]))
if(len(tempValForAvg) < max_day):
tempValFormax_day = tempValForAvg
else:
tempValFormax_day = tempValForAvg[len(tempValForAvg)-max_day:len(tempValForAvg)]
# We have strings...
tempValFormax_day = [float(i) for i in tempValFormax_day]
all_x_avg.append(day)
all_y_avg.append(np.mean(tempValFormax_day))
if(_color=="r"):
_color = "red"
elif(_color=="g"):
_color = "green"
elif(_color=="o"):
_color = "orange"
else:
_color = "black"
print("Generating graph for zip: " + name + " color: " + _color)
fig = go.Figure()
fig.add_trace(go.Bar(x=all_x, y=all_y, marker_color='rgba(158,158,158,.4)' ))
fig.add_trace(go.Scatter(x=all_x_avg, y=all_y_avg, marker_color=_color))
# Add line to every 1s & 15th of all months
for d in all_x:
if(d.endswith('15') or d.endswith('01')):
fig.add_shape(
type="line",
x0=d,
y0=0,
x1=d,
y1=np.max(all_y),
opacity=0.4,
line=dict(
color="rgba(0,0,0,.7)",
width=1,
dash="dot",
)
)
fig.update_xaxes(rangemode="nonnegative")
fig.update_yaxes(rangemode="nonnegative")
fig.update_layout(
width=350,
height=350,
margin=dict(l=30, r=20, t=0, b=20), # Top 0 with no title
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
showlegend= False
)
#print(folder + name + " > created")
fig.write_image(folder + name + ".png")
# Generate Large Graph for the State Detail page
# with 3day average line value, new cases & tests
# WARNING THIS IS THE DUAL AXIS VERSION WITH CASES & TESTS
def generate_dual_graph_X_and_cases(state, _color, folder, dataset1, dataset2, output_ext, large = False):
# Daily Data Source File
cur_json_file = open(PATH_TO_STATES_FOLDER + os.sep + state + os.sep + state + ".json", 'r')
data = json.load(cur_json_file)
# Structure for datasets1
all_x1 = []
all_y1 = []
all_x2=[]
all_y2=[]
# Get raw data
for d in data['stats']:
for day in d:
# Dataset1
all_x1.append(day)
all_y1.append(d[day][dataset1['_type']])
# Dataset2
all_x2.append(day)
all_y2.append(d[day][dataset2['_type']])
if(_color=="r"):
_color = "red"
_3dcolor = "rgba(255,0,0,0.5)"
elif(_color=="g"):
_color = "green"
_3dcolor = "rgba(34,139,34,0.5)"
elif(_color=="o"):
_color = "orange"
_3dcolor = "rgba(255,165,0,0.5)"
else:
_color = "black"
_3dcolor = "rgba(0,0,0,0.5)"
# Create Fig with secondary ax
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.update_xaxes(rangemode="nonnegative")
fig.update_yaxes(rangemode="nonnegative")
# Add the bars (always dataset2)
fig.add_trace(go.Bar(x=all_x2, y=all_y2, marker_color='rgba(158,158,158,.4)', name= dataset2['name'] ), secondary_y=True )
# AVGs in dataset1
maxY_avg_dataset1 = [0]
if('avg' in dataset1):
line_width = len(dataset1['avg'])
if(line_width==1):
line_width = 2
for avg in dataset1['avg']:
avgx, avgy, delta = get_avg_data(avg,state,dataset1['_type'])
fig.add_trace(go.Scatter(x=avgx, y=avgy, name= str(avg) +"-Day Avg " + dataset1['name'], line=dict(color="purple",width=line_width)))
maxY_avg_dataset1.append(np.max(avgy))
if('raw_line' in dataset1):
if(dataset1['raw_line'] is False):
line_width -= 1
# AVGs in dataset2
maxY_avg_dataset2 = [0]
if('avg' in dataset2):
line_width = len(dataset2['avg'])
if(line_width==1):
line_width = 2
for avg in dataset2['avg']:
avgx, avgy, delta = get_avg_data(avg,state,dataset2['_type'])
fig.add_trace(go.Scatter(x=avgx, y=avgy, name= str(avg) +"-Day Avg " + dataset2['name'], line=dict(color= _color,width=line_width)),secondary_y=True)
maxY_avg_dataset2.append(np.max(avgy))
if('raw_line' in dataset2):
if(dataset2['raw_line'] is False):
line_width -= 1
# Dataset1
if('raw_line' in dataset1):
if(dataset1['raw_line'] is True):
fig.add_trace(go.Scatter(x=all_x1, y=all_y1, name= dataset1['name'], line=dict(color="purple",width=2)))
maxY_avg_dataset1 = [np.max(all_y1)]
else:
maxY_avg_dataset1 = [0]
# Dataset2
if('raw_line' in dataset2):
if(dataset2['raw_line'] is True):
fig.add_trace(go.Scatter(x=all_x2, y=all_y2, name= dataset2['name'], line=dict(color=_color,width=2)),secondary_y=True)
maxY_avg_dataset2 = [np.max(all_y2)]
else:
maxY_avg_dataset2 = [0]
# Get MAX Y for drawing the 1st & 15th lines
# + the lockdown period
if(len(maxY_avg_dataset2)==1 and len(maxY_avg_dataset1)==1):
max_y = np.max(all_y1)
elif(len(maxY_avg_dataset2)==1 and len(maxY_avg_dataset1)!=1):
max_y = np.max(maxY_avg_dataset1)
elif(len(maxY_avg_dataset1)==1 and len(maxY_avg_dataset2)!=1):
max_y = np.max(all_y1)
else:
max_y = np.max(maxY_avg_dataset1)
# Add line to every 1s & 15th of all months
for date in all_x1:
if(date.endswith('15') or date.endswith('01')):
fig.add_shape(
type="line",
x0=date,
y0=0,
x1=date,
y1=max_y,
opacity=0.4,
line=dict(
color="rgba(0,0,0,.5)",
width=1,
),
layer="below"
)
# Do we have a period in key-dates.txt
# for the current state?
key_dates = open(KEY_DATES,'r')
csv_reader = csv.DictReader(key_dates)
rows = list(csv_reader)
start_lockdown_date = -1
end_lockdown_date = -1
for key_date_row in rows:
if(key_date_row['state']==state):
start_lockdown_date = key_date_row['start']
if(key_date_row['end'] is not None):
end_lockdown_date = key_date_row['end']
# Add lockdown period
if(start_lockdown_date!=-1 and start_lockdown_date is not None ):
if(end_lockdown_date!=-1 and end_lockdown_date is not None):
fig.add_shape(
type="rect",
x0=start_lockdown_date,
y0=0,
x1=end_lockdown_date,
y1=max_y,
fillcolor="LightSalmon",
opacity=0.1,
line_width=0,
layer="below"
)
elif(start_lockdown_date is not None):
fig.add_shape(
type="rect",
x0=start_lockdown_date,
y0=0,
x1=all_x1[len(all_x1)-1],
y1=max_y,
fillcolor="LightSalmon",
opacity=0.1,
line_width=0,
layer="below"
)
if(large is True):
fig.update_layout(
width=1000,
height=450,
title = US_STATES[state] + " Tests and New Cases",
margin=dict(l=30, r=20, t=45, b=30), # Top Title
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
showlegend= True,
yaxis2=dict( showgrid=False,
titlefont=dict(
color=_color
)
),
yaxis1=dict(
showgrid=False,
titlefont=dict(
color="purple"
)
),
legend_orientation="h"
)
else:
fig.update_layout(
width=455,
height=290,
margin=dict(l=30, r=20, t=0, b=20), # Top 0 with no title
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
showlegend= False,
yaxis2=dict(
showgrid=False,
titlefont=dict( color=_color )
),
yaxis1=dict(
showgrid=False,
titlefont=dict(
color="purple"
)
),
legend_orientation="h"
)
fig.update_yaxes(title_text="<b>"+dataset1['name']+"</b>", secondary_y=False)
fig.update_yaxes(title_text="<b>"+dataset2['name']+"</b>", secondary_y=True)
# Create file
fig.write_image(folder + os.sep + state + output_ext)
print(folder + os.sep + state + output_ext + " Created")
# Generate a graph based on state, type (like deaths, cases, mortality etc.) & color
# For states & county
def generate_graph_with_avg(state, _type, _color, folder, county, large=False):
# Get JSON Data for current state or county
if(county != '' and 'for_a_state' not in county):
cur_json_file = open(PATH_TO_STATES_FOLDER + os.sep + state + os.sep + "counties" + os.sep + county + ".json", 'r')
else:
cur_json_file = open(PATH_TO_STATES_FOLDER + os.sep + state + os.sep + state + ".json", 'r')
data = json.load(cur_json_file)
# Do we have a period in key-dates.txt
# for the current state?
key_dates = open(KEY_DATES,'r')
csv_reader = csv.DictReader(key_dates)
rows = list(csv_reader)
start_lockdown_date = -1
end_lockdown_date = -1
for key_date_row in rows:
if(key_date_row['state']==state):
start_lockdown_date = key_date_row['start']
if(key_date_row['end'] is not None):
end_lockdown_date = key_date_row['end']
if(county=="" or 'for_a_state' in county):
all_data = data['stats']
else:
all_data = data['stats']
# We sort the data by inverse date for counties
all_data = list(reversed(all_data))
# All Data
all_x = []
all_y = []
# Special to get % on the graphs
if(_type == 'test_pos_p'):
for d in all_data:
for day in d:
# Org Data
all_x.append(day)
all_y.append(d[day][_type]/100) # To get the % in the graphs
elif(_type != 'mortality'):
# 7day Average Data
all_x_avg, all_y_avg, delta = get_X_day_avg(7,all_data,_type)
for d in all_data:
for day in d:
# Org Data
all_x.append(day)
all_y.append(d[day][_type])
else:
# We compute the Mortality rate
# (total_dead/total_cases) *100
for d in all_data:
for day in d:
all_x.append(day)
if(d[day]['total_c']>0):
all_y.append(d[day]['total_d']/d[day]['total_c'])
else:
all_y.append(0)
# Compute the 7d avg for mortality
tempValForAvg = []
tempValFormax_day = []
all_x_avg = []
all_y_avg = []
max_day = 7
for c,y in enumerate(all_y):
# For average of _type
tempValForAvg.append(y)
if(len(tempValForAvg) < max_day):
tempValFormax_day = tempValForAvg
else:
tempValFormax_day = tempValForAvg[len(tempValForAvg)-max_day:len(tempValForAvg)]
all_x_avg.append(all_x[c])
all_y_avg.append(np.mean(tempValFormax_day))
if(_color=="r"):
_color = "red"
elif(_color=="g"):
_color = "green"
elif(_color=="o"):
_color = "orange"
else:
_color = "black"
fig = go.Figure()
fig.add_trace(go.Bar(x=all_x, y=all_y, marker_color='rgba(158,158,158,.4)'))
fig.add_trace(go.Scatter(x=all_x_avg, y=all_y_avg, marker_color=_color))
# Add line to every 1s & 15th of all months
for date in all_x:
if(date.endswith('15') or date.endswith('01')):
fig.add_shape(
type="line",
x0=date,
y0=0,
x1=date,
y1=np.max(all_y),
opacity=0.4,
line=dict(
color="rgba(0,0,0,.5)",
width=1,
)
)
# Add lockdown period
if(start_lockdown_date!=-1 and start_lockdown_date is not None ):
if(end_lockdown_date!=-1 and end_lockdown_date is not None):
fig.add_shape(
type="rect",
x0=start_lockdown_date,
y0=0,
x1=end_lockdown_date,
y1=np.max(all_y),
fillcolor="LightSalmon",
opacity=0.1,
layer="below",
line_width=0,
)
elif(start_lockdown_date is not None):
fig.add_shape(
type="rect",
x0=start_lockdown_date,
y0=0,
x1=all_x[len(all_x)-1],
y1=np.max(all_y),
fillcolor="LightSalmon",
opacity=0.1,
layer="below",
line_width=0,
)
fig.update_xaxes(rangemode="nonnegative")
fig.update_yaxes(rangemode="nonnegative")
fig.update_layout(
margin=dict(l=30, r=20, t=5, b=20), # Top 0 with no title
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
showlegend= False,
autosize=False
)
if(_type == 'test_pos_p' or _type == 'mortality'):
fig.update_layout(yaxis=dict(tickformat=".2%"))
if(county ==""):
fig.write_image(folder + os.sep + state + ".png", width=350, height=350)
print("Graph for " + state + ' Cases (' + _color + ') created')
elif('for_a_state' in county):
tmp = county.split('|')[1]
fig.write_image(folder + os.sep + tmp + ".png", width=350, height=350)
print("Graph for " + state + ' ' + tmp + ' created')
else:
fig.write_image(folder + os.sep + county + ".png", width=350, height=350)
print("Graph for " + county + ", " + state + ' Cases (' + _color + ') created')
if(large is True):
# We also save the larger version of the graph
fig.update_layout(
margin=dict(l=30, r=20, t=0, b=20), # Top 0 with no title
paper_bgcolor='rgba(255,255,255,1)',
plot_bgcolor='rgba(255,255,255,1)',
showlegend= False,
)
if(county ==""):
fig.write_image(folder + os.sep + state + "_lg.png", width=1084, height=450)
print("Graph for " + state + ' Cases (' + _color + ') Larger Version created')
elif('for_a_state' in county):
tmp = county.split('|')[1]
fig.write_image(folder + os.sep + tmp + "_lg.png", width=1084, height=450)
print("Graph for " + state + ' ' + tmp + ' Larger Version created')
else:
fig.write_image(folder + os.sep + county + "_lg.png", width=1084, height=450)
print("Graph for " + county + ", " + state + ' Cases (' + _color + ') Larger Version created')
def main_menu():
print("---------------")
print(" Enter the attributes of the graph ")
print("---------------")
state = input("State Code (ex: AK): ")
_color = input("Color ('r' for red ,'g' for green, 'o' for orange, 'b' for black):")
generate_graph_with_avg(state, 'cases', _color, PATH_TO_STATES_FOLDER + os.sep + state, '')
if __name__ == "__main__":
#os.system("clear")
#main_menu()
#generate_graph_with_avg("FL", 'test_pos_p', "r", PATH_TO_STATES_FOLDER + os.sep + "FL" + os.sep , 'for_a_state|test_pos_p')
#generate_graph_with_avg("FL", 'test_pos_p', "r", PATH_TO_STATES_FOLDER + os.sep + "FL" + os.sep , 'for_a_state|test_pos_p' , True)
generate_graph_with_avg("TX", 'mortality', "r", PATH_TO_STATES_FOLDER + os.sep + "TX" + os.sep , 'for_a_state|mortaliy')
generate_graph_with_avg("TX", 'mortality', "r", PATH_TO_STATES_FOLDER + os.sep + "TX" + os.sep , 'for_a_state|mortaliy', True)
#generate_graph_with_avg("CA", 'mortality', "r", PATH_TO_STATES_FOLDER + os.sep + "CA" + os.sep , 'for_a_state|mortaliy', True)
#generate_large_graph_with_avg("CA", "r", PATH_TO_STATES_FOLDER + os.sep + "CA" + os.sep)
#generate_dual_graph_X_and_cases("FL", "r", PATH_TO_STATES_FOLDER + os.sep + "FL" ,
# {'_type':'test','name':'Tests','raw_line':True},
# {'_type':'cases','name':'Cases','avg':[7,3], 'raw_line':False},
# output_ext = '_blg.png', # Just for Mike
# large=True)
#generate_dual_graph_X_and_cases("CA", "r", PATH_TO_STATES_FOLDER + os.sep + "CA" + os.sep,
# {'_type':'test','name':'7D. Avg Tests','avg':[7],'raw_line':False},
# {'_type':'cases','name':'7D. Avg Cases','avg':[7],'raw_line':False},
# output_ext = '_tac.png', # TAC = test & case
# large=False)
# generate_dual_graph_X_and_cases("CA", "r", PATH_TO_STATES_FOLDER + os.sep + "CA",
# {'_type':'act_hosp','name':'7D. Avg Hospitalization','avg':[7],'raw_line':False},
# {'_type':'cases','name':'7D. Avg Cases','avg':[7],'raw_line':False},
# output_ext = '_hospi_and_cases.png',
# large=False) | [
"plotly.graph_objects.Bar",
"numpy.mean",
"csv.DictReader",
"plotly.subplots.make_subplots",
"numpy.max",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Scatter",
"json.load"
] | [((3526, 3550), 'json.load', 'json.load', (['cur_json_file'], {}), '(cur_json_file)\n', (3535, 3550), False, 'import json\n'), ((4274, 4320), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}]]"}), "(specs=[[{'secondary_y': True}]])\n", (4287, 4320), False, 'from plotly.subplots import make_subplots\n'), ((7332, 7357), 'csv.DictReader', 'csv.DictReader', (['key_dates'], {}), '(key_dates)\n', (7346, 7357), False, 'import csv\n'), ((10522, 10546), 'json.load', 'json.load', (['cur_json_file'], {}), '(cur_json_file)\n', (10531, 10546), False, 'import json\n'), ((10672, 10697), 'csv.DictReader', 'csv.DictReader', (['key_dates'], {}), '(key_dates)\n', (10686, 10697), False, 'import csv\n'), ((12839, 12850), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (12848, 12850), True, 'import plotly.graph_objects as go\n'), ((2015, 2026), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (2024, 2026), True, 'import plotly.graph_objects as go\n'), ((4470, 4561), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'all_x2', 'y': 'all_y2', 'marker_color': '"""rgba(158,158,158,.4)"""', 'name': "dataset2['name']"}), "(x=all_x2, y=all_y2, marker_color='rgba(158,158,158,.4)', name=\n dataset2['name'])\n", (4476, 4561), True, 'import plotly.graph_objects as go\n'), ((6522, 6536), 'numpy.max', 'np.max', (['all_y1'], {}), '(all_y1)\n', (6528, 6536), True, 'import numpy as np\n'), ((12868, 12929), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'all_x', 'y': 'all_y', 'marker_color': '"""rgba(158,158,158,.4)"""'}), "(x=all_x, y=all_y, marker_color='rgba(158,158,158,.4)')\n", (12874, 12929), True, 'import plotly.graph_objects as go\n'), ((12948, 13005), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'all_x_avg', 'y': 'all_y_avg', 'marker_color': '_color'}), '(x=all_x_avg, y=all_y_avg, marker_color=_color)\n', (12958, 13005), True, 'import plotly.graph_objects as go\n'), ((2047, 2108), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'all_x', 'y': 'all_y', 'marker_color': '"""rgba(158,158,158,.4)"""'}), "(x=all_x, y=all_y, marker_color='rgba(158,158,158,.4)')\n", (2053, 2108), True, 'import plotly.graph_objects as go\n'), ((2131, 2188), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'all_x_avg', 'y': 'all_y_avg', 'marker_color': '_color'}), '(x=all_x_avg, y=all_y_avg, marker_color=_color)\n', (2141, 2188), True, 'import plotly.graph_objects as go\n'), ((6617, 6642), 'numpy.max', 'np.max', (['maxY_avg_dataset1'], {}), '(maxY_avg_dataset1)\n', (6623, 6642), True, 'import numpy as np\n'), ((12628, 12654), 'numpy.mean', 'np.mean', (['tempValFormax_day'], {}), '(tempValFormax_day)\n', (12635, 12654), True, 'import numpy as np\n'), ((5036, 5048), 'numpy.max', 'np.max', (['avgy'], {}), '(avgy)\n', (5042, 5048), True, 'import numpy as np\n'), ((5636, 5648), 'numpy.max', 'np.max', (['avgy'], {}), '(avgy)\n', (5642, 5648), True, 'import numpy as np\n'), ((6004, 6018), 'numpy.max', 'np.max', (['all_y1'], {}), '(all_y1)\n', (6010, 6018), True, 'import numpy as np\n'), ((6310, 6324), 'numpy.max', 'np.max', (['all_y2'], {}), '(all_y2)\n', (6316, 6324), True, 'import numpy as np\n'), ((6723, 6737), 'numpy.max', 'np.max', (['all_y1'], {}), '(all_y1)\n', (6729, 6737), True, 'import numpy as np\n'), ((6761, 6786), 'numpy.max', 'np.max', (['maxY_avg_dataset1'], {}), '(maxY_avg_dataset1)\n', (6767, 6786), True, 'import numpy as np\n'), ((13259, 13272), 'numpy.max', 'np.max', (['all_y'], {}), '(all_y)\n', (13265, 13272), True, 'import numpy as np\n'), ((13730, 13743), 'numpy.max', 'np.max', (['all_y'], {}), '(all_y)\n', (13736, 13743), True, 'import numpy as np\n'), ((1710, 1736), 'numpy.mean', 'np.mean', (['tempValFormax_day'], {}), '(tempValFormax_day)\n', (1717, 1736), True, 'import numpy as np\n'), ((2450, 2463), 'numpy.max', 'np.max', (['all_y'], {}), '(all_y)\n', (2456, 2463), True, 'import numpy as np\n'), ((14085, 14098), 'numpy.max', 'np.max', (['all_y'], {}), '(all_y)\n', (14091, 14098), True, 'import numpy as np\n')] |
import requests
import numpy as np
import pandas as pd
from src.scraping.InCroatia import *
# df = pd.DataFrame(columns=['latitude', 'longitude'])
# df.to_csv('../../data/external/valid_lat_long.csv', index=False)
GOOGLE_API_KEY = ""
SAVE_PATH = '../../data/external/valid_lat_long.csv'
df = pd.read_csv(SAVE_PATH)
df.to_csv(SAVE_PATH, index=False)
already_checked = set([(row.latitude, row.longitude) for idx, row in df.iterrows()])
n_iter = 0
n_success = 0
InCroatiaChecker = InCroatia()
while True:
n_iter += 1
bounding_box = [13.3569755388, 42.07999136, 19.1904757016, 46.55] # left-bottom-right-top
lat = round(np.random.uniform(bounding_box[1], bounding_box[3]), 6)
lng = round(np.random.uniform(bounding_box[0], bounding_box[2]), 6)
if (lat, lng) in already_checked:
continue
if not InCroatiaChecker.check(lat=lat, long=lng):
continue
already_checked.update((lat, lng))
URL = f"https://maps.googleapis.com/maps/api/streetview/metadata?&location={lat},{lng}&key={GOOGLE_API_KEY}"
response = requests.get(URL).json()['status']
success = 0
if response == "OK":
success = 1
n_success += 1
print(
round(df['success'].mean(), 4),
f"{int(df['success'].sum())}/{len(df)}"
)
df.loc[len(df.index)] = [lat, lng, success]
if n_iter % 20 == 19:
df.to_csv(SAVE_PATH, index=False)
if n_iter % 100 == 99:
print(URL)
| [
"requests.get",
"pandas.read_csv",
"numpy.random.uniform"
] | [((296, 318), 'pandas.read_csv', 'pd.read_csv', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (307, 318), True, 'import pandas as pd\n'), ((636, 687), 'numpy.random.uniform', 'np.random.uniform', (['bounding_box[1]', 'bounding_box[3]'], {}), '(bounding_box[1], bounding_box[3])\n', (653, 687), True, 'import numpy as np\n'), ((708, 759), 'numpy.random.uniform', 'np.random.uniform', (['bounding_box[0]', 'bounding_box[2]'], {}), '(bounding_box[0], bounding_box[2])\n', (725, 759), True, 'import numpy as np\n'), ((1061, 1078), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (1073, 1078), False, 'import requests\n')] |
''' Functions relating to HDR mode '''
import numpy as np
import astropy.units as u
import astropy.constants as cr
from astropy.modeling.blackbody import FLAM
from astropy.convolution import convolve_fft, Gaussian2DKernel
# Set telescope details
epd = 75 * u.cm
area = np.pi * (0.5*epd)**2
reflectivity = 0.9
mirrors = 3
qe = 0.4 * u.electron / u.ph
# Band-specific details
band_wav = {'nuv': [200, 300] * u.nm, 'fuv': [145, 175] * u.nm}
sky_bgd_rate = {'nuv': 1.0 * u.ph / u.s, 'fuv': 0.015 * u.ph / u.s} # per pixel
dichroic = {'nuv': 0.75, 'fuv': 0.5}
# Set some detector details
psf_pix = 4
dark_current = 0.01 * u.electron / u.s
pixel_size = 1 * u.arcsec
# Gain-mode-specific details
gain = {'high': 1.2 * u.adu / u.electron, 'low': 10.1 * u.adu / u.electron}
read_noise = {'high': 2 * u.electron, 'low': 10 * u.electron}
well_depth = {'high': 25000 * u.electron, 'low': 190000 * u.electron}
# PSF details
psf_fwhm = 2 * u.arcsec
def magnitude_to_count_rate(magnitudes, band='fuv'):
''' Convert (flat) input magnitudes to count rates
Parameters
----------
magnitudes : float array
Array of magnitudes in ABmag units
band : string
Which UVEX band to use, options are 'nuv' and 'fuv'
Defaults to 'fuv'
Returns
-------
count_rate : float array
Array of count rates in ph/s units
'''
wav = np.arange(1000,5000) * u.AA # Wavelength scale in 1 Angstrom steps
dw = 1 * u.AA
ph_energy = (cr.h.cgs * cr.c.cgs / wav.cgs) / u.ph
count_rate = np.zeros(len(magnitudes)) * u.ph / u.s
for i, m in enumerate(magnitudes):
# Convert to flux density
flux_den = m.to(FLAM, equivalencies=u.spectral_density(wav))
ph_flux = flux_den * dw / ph_energy
# In-band rate
fluence = ph_flux[(wav >= band_wav[band][0].to(u.AA)) & (wav <= band_wav[band][1].to(u.AA))].sum()
count_rate[i] = fluence * area * (reflectivity**mirrors) * dichroic[band]
return count_rate
def count_rate_to_electron_rate(count_rate):
''' Calculates the number of measured electrons for given count rate and exposure time
Parameters
----------
count_rate : float array
Array of count rates in photons per second
exp_time: float
Exposure time in seconds
gain_mode : string
Which detector gain mode to use, options are 'low' and 'high'
Defaults to 'high'
Returns
-------
e : float array
Array of number of count rate in electrons per second the same shape as count_rate
'''
e = count_rate * qe
# No quantum yield consideration at this time
return e
def count_rate_to_electrons(count_rate, exp_time, gain_mode='high'):
''' Calculates the number of measured electrons for given count rate and exposure time
Incorporates dark current and saturation, but otherwise returns ideal number of electrons (no noise/quantization)
Parameters
----------
count_rate : float array
Array of count rates in photons per second
exp_time: float
Exposure time in seconds
gain_mode : string
Which detector gain mode to use, options are 'low' and 'high'
Defaults to 'high'
Returns
-------
e : float array
Array of number of electrons measured the same shape as count_rate
saturated : bool array
Array the same shape as count_rate stating which pixels are saturated (i.e. True)
'''
e = (count_rate * qe) * exp_time
# No quantum yield consideration at this time
# Handle saturation
saturated = (e + dark_current * exp_time) > well_depth[gain_mode]
e[saturated] = well_depth[gain_mode]
return e, saturated
def get_signal(count_rate, exp_times, gain_mode='high'):
''' Runs count_rate_to_electrons then converts to detector signal
Parameters
----------
count_rate : float array
Array of count rates in photons per second
exp_times: float array
Exposure time in seconds
gain_mode : string
Which detector gain mode to use, options are 'low' and 'high'
Defaults to 'high'
Returns
-------
signal : 2-D float array
Array of detector signal in ADU with shape len(exp_times) x len(count_rate)
saturated : 2-D bool array
Array stating which pixels are saturated (i.e. True) with shape len(exp_times) x len(count_rate)
'''
signals, sat = [], []
for t in exp_times:
e, saturated = count_rate_to_electrons(count_rate, t, gain_mode=gain_mode)
signals.append(e * gain[gain_mode])
sat.append(saturated)
return np.array(signals), np.array(sat)
def get_snr(count_rate, exp_times, band='fuv', gain_mode='high'):
''' Calculate SNR for given count rates and exposure times
Parameters
----------
count_rate : float array
Array of count rates in photons per second
exp_times: float array
Array of exposure times in seconds
band : string
Which UVEX band to use, options are 'nuv' and 'fuv'
Defaults to 'fuv'
gain_mode : string
Which detector gain mode to use, options are 'low' and 'high'
Defaults to 'high'
Returns
-------
snr : 2-D float array
Array of SNR with shape len(exp_times) x len(count_rate)
'''
snr_list = []
for t in exp_times:
# Input photon flux to electrons for given exposure time
signal, saturated = count_rate_to_electrons(count_rate, t, gain_mode=gain_mode)
# Get background rate
sky_bgd, _ = count_rate_to_electrons(sky_bgd_rate[band], t, gain_mode=gain_mode)
# Calculate shot noise and dark noise
shot_noise = np.sqrt(signal + sky_bgd).value * u.electron
# Get SNR
snr = signal / np.sqrt(shot_noise**2 + read_noise[gain_mode]**2)
snr[saturated] = 0
snr_list.append(snr)
# Also add Fano noise in quad with these when implementing quantum yield
return np.array(snr_list)
def perform_hdr_simple(pixels, saturated, exp_times):
''' Simple HDR algorithm to iterate through exposure times and select pixels from as long an exposure as possible
Parameters
----------
pixels : 2-D float array
Array of pixel values (could be signal or SNR) to be selected
Second dimension must be same length as exp_times
saturated: bool array
Array stating which pixels are saturated (i.e. True), the same shape as pixels
exp_times: float array
Array of exposure times in seconds
Returns
-------
hdr_pixels : float array
1-D array of pixel values selected from pixels based on exp_times and saturation
'''
# Sort exp_times by duration, longest to shortest
sortind = np.argsort(-exp_times)
hdr_pixels = np.zeros(pixels.shape[1])
prev_sat = True
for i in sortind:
this_sat = saturated[i]
# If pixel was saturated in a previous exposure but not this one, assign value
this_exp = prev_sat & ~this_sat
hdr_pixels[this_exp] = pixels[i][this_exp]
prev_sat = this_sat
return hdr_pixels
def create_image(im_frame_size,exp_time,sources=[],band='fuv',gain_mode='high'):
''' Creates an image from an exposure, with given sources
Parameters
----------
im_frame_size : int
Size of the resulting image in pixels
exp_time: float
Exposure time in seconds
sources: QTable object
QTable of sources in format (x_pos, y_pos, count_rate)
x_pos and y_pos are floats, fractional positions between 0 and 1
count_rate is in photons per second
'''
# Initialise oversampling
oversample = 6
src_frame_size = 25 # In pixels
pixel_size_init = pixel_size / oversample
src_frame_size_init = src_frame_size * oversample
im_frame_size_init = im_frame_size * oversample
# Create empty oversampled image
im_array = np.zeros([im_frame_size_init,im_frame_size_init]) * u.ph / u.s
# Create PSF kernel
psf_kernel = Gaussian2DKernel(psf_fwhm / pixel_size_init,
x_size=src_frame_size_init, y_size=src_frame_size_init)
psf_array = psf_kernel.array
# Add sources
if len(sources) > 0:
source_inv = np.array([sources['y_pos'],sources['x_pos']]) # Create array of all ys and all xs
source_pix = (source_inv.transpose() * np.array(im_array.shape)).transpose().astype(int)
im_array[tuple(source_pix)] += sources['src_cr']
# Now convolve with the PSF
im_psf = convolve_fft(im_array.value, psf_kernel) * im_array.unit
# Bin up the image by oversample parameter to the correct pixel size
shape = (im_frame_size, oversample, im_frame_size, oversample)
im_binned = im_psf.reshape(shape).sum(-1).sum(1)
im_binned[im_binned < 0] = 0
# Convert to observed source counts
im_counts = im_binned * exp_time
im_sky = np.ones(im_counts.shape) * sky_bgd_rate[band] * exp_time
# Observe! Includes sky rate and dark current
im_poisson = (np.random.poisson(im_counts.value) + np.random.poisson(im_sky.value)) * im_counts.unit
# Read! Convert to electrons, apply saturation and read noise
# No quantum yield consideration at this time
im_read = im_poisson * qe + dark_current * exp_time
im_read[im_read > well_depth[gain_mode]] = well_depth[gain_mode]
im_read += np.random.normal(loc=0, scale=read_noise[gain_mode].value,
size=im_read.shape) * im_read.unit
im_read = np.floor(im_read)
im_read[im_read < 0] = 0
# Finally, convert to ADU
im_adu = im_read * gain[gain_mode]
return im_adu
| [
"numpy.random.normal",
"numpy.sqrt",
"numpy.ones",
"numpy.random.poisson",
"numpy.floor",
"astropy.units.spectral_density",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"astropy.convolution.convolve_fft",
"astropy.convolution.Gaussian2DKernel",
"numpy.arange"
] | [((5544, 5562), 'numpy.array', 'np.array', (['snr_list'], {}), '(snr_list)\n', (5552, 5562), True, 'import numpy as np\n'), ((6270, 6292), 'numpy.argsort', 'np.argsort', (['(-exp_times)'], {}), '(-exp_times)\n', (6280, 6292), True, 'import numpy as np\n'), ((6309, 6334), 'numpy.zeros', 'np.zeros', (['pixels.shape[1]'], {}), '(pixels.shape[1])\n', (6317, 6334), True, 'import numpy as np\n'), ((7546, 7650), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['(psf_fwhm / pixel_size_init)'], {'x_size': 'src_frame_size_init', 'y_size': 'src_frame_size_init'}), '(psf_fwhm / pixel_size_init, x_size=src_frame_size_init,\n y_size=src_frame_size_init)\n', (7562, 7650), False, 'from astropy.convolution import convolve_fft, Gaussian2DKernel\n'), ((9047, 9064), 'numpy.floor', 'np.floor', (['im_read'], {}), '(im_read)\n', (9055, 9064), True, 'import numpy as np\n'), ((1323, 1344), 'numpy.arange', 'np.arange', (['(1000)', '(5000)'], {}), '(1000, 5000)\n', (1332, 1344), True, 'import numpy as np\n'), ((4312, 4329), 'numpy.array', 'np.array', (['signals'], {}), '(signals)\n', (4320, 4329), True, 'import numpy as np\n'), ((4331, 4344), 'numpy.array', 'np.array', (['sat'], {}), '(sat)\n', (4339, 4344), True, 'import numpy as np\n'), ((7779, 7825), 'numpy.array', 'np.array', (["[sources['y_pos'], sources['x_pos']]"], {}), "([sources['y_pos'], sources['x_pos']])\n", (7787, 7825), True, 'import numpy as np\n'), ((8061, 8101), 'astropy.convolution.convolve_fft', 'convolve_fft', (['im_array.value', 'psf_kernel'], {}), '(im_array.value, psf_kernel)\n', (8073, 8101), False, 'from astropy.convolution import convolve_fft, Gaussian2DKernel\n'), ((8906, 8984), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'read_noise[gain_mode].value', 'size': 'im_read.shape'}), '(loc=0, scale=read_noise[gain_mode].value, size=im_read.shape)\n', (8922, 8984), True, 'import numpy as np\n'), ((5361, 5414), 'numpy.sqrt', 'np.sqrt', (['(shot_noise ** 2 + read_noise[gain_mode] ** 2)'], {}), '(shot_noise ** 2 + read_noise[gain_mode] ** 2)\n', (5368, 5414), True, 'import numpy as np\n'), ((7441, 7491), 'numpy.zeros', 'np.zeros', (['[im_frame_size_init, im_frame_size_init]'], {}), '([im_frame_size_init, im_frame_size_init])\n', (7449, 7491), True, 'import numpy as np\n'), ((8436, 8460), 'numpy.ones', 'np.ones', (['im_counts.shape'], {}), '(im_counts.shape)\n', (8443, 8460), True, 'import numpy as np\n'), ((8562, 8596), 'numpy.random.poisson', 'np.random.poisson', (['im_counts.value'], {}), '(im_counts.value)\n', (8579, 8596), True, 'import numpy as np\n'), ((8599, 8630), 'numpy.random.poisson', 'np.random.poisson', (['im_sky.value'], {}), '(im_sky.value)\n', (8616, 8630), True, 'import numpy as np\n'), ((1615, 1638), 'astropy.units.spectral_density', 'u.spectral_density', (['wav'], {}), '(wav)\n', (1633, 1638), True, 'import astropy.units as u\n'), ((5285, 5310), 'numpy.sqrt', 'np.sqrt', (['(signal + sky_bgd)'], {}), '(signal + sky_bgd)\n', (5292, 5310), True, 'import numpy as np\n'), ((7908, 7932), 'numpy.array', 'np.array', (['im_array.shape'], {}), '(im_array.shape)\n', (7916, 7932), True, 'import numpy as np\n')] |
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import cv2
import os
def covidTest(filePath):
train = ImageDataGenerator(rescale= 1/255)
validation = ImageDataGenerator(rescale= 1/255)
train_dataset = train.flow_from_directory('Images/train_dataset', target_size= (200,200), batch_size = 3, class_mode = 'binary')
validation_dataset = validation.flow_from_directory('Images/validation', target_size= (200,200), batch_size = 3, class_mode = 'binary')
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3), activation = 'relu', input_shape = (200,200,3)),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(32,(3,3), activation = 'relu'), tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Flatten(), tf.keras.layers.Dense(512,activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer = RMSprop(lr=0.001), metrics = ['accuracy'])
model_fit=model.fit(train_dataset, steps_per_epoch = 3, epochs= 10, validation_data = validation_dataset)
print(validation_dataset.class_indices)
#Visualize the models accuracy
plt.plot(model_fit.history['accuracy'])
plt.plot(model_fit.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', ' Val'], loc='upper left')
plt.savefig('plotimage/accuracy.png')
plt.show()
#Visualize the models loss
plt.plot(model_fit.history['loss'])
plt.plot(model_fit.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', ' Val'], loc='upper right')
plt.show()
dir_path = filePath
for i in os.listdir(dir_path):
img = image.load_img(dir_path+ '//' + i, target_size=(200,200))
print(i)
plt.imshow(img)
plt.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis =0)
images =np.vstack([X])
val = model.predict(images)
if val == 0:
return str('Covid Positive')
else:
return str('Covid Negative')
| [
"matplotlib.pyplot.ylabel",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.imshow",
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.vstack",
"tensorflow.keras.preprocessing.... | [((300, 335), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255)'}), '(rescale=1 / 255)\n', (318, 335), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((352, 387), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255)'}), '(rescale=1 / 255)\n', (370, 387), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1345, 1384), 'matplotlib.pyplot.plot', 'plt.plot', (["model_fit.history['accuracy']"], {}), "(model_fit.history['accuracy'])\n", (1353, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1432), 'matplotlib.pyplot.plot', 'plt.plot', (["model_fit.history['val_accuracy']"], {}), "(model_fit.history['val_accuracy'])\n", (1397, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1464), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Accuracy"""'], {}), "('Model Accuracy')\n", (1446, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1491), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1479, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1515), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1506, 1515), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1567), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', ' Val']"], {'loc': '"""upper left"""'}), "(['Train', ' Val'], loc='upper left')\n", (1530, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1609), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plotimage/accuracy.png"""'], {}), "('plotimage/accuracy.png')\n", (1583, 1609), True, 'import matplotlib.pyplot as plt\n'), ((1614, 1624), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1622, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1663, 1698), 'matplotlib.pyplot.plot', 'plt.plot', (["model_fit.history['loss']"], {}), "(model_fit.history['loss'])\n", (1671, 1698), True, 'import matplotlib.pyplot as plt\n'), ((1703, 1742), 'matplotlib.pyplot.plot', 'plt.plot', (["model_fit.history['val_loss']"], {}), "(model_fit.history['val_loss'])\n", (1711, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1770), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Loss"""'], {}), "('Model Loss')\n", (1756, 1770), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1793), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (1785, 1793), True, 'import matplotlib.pyplot as plt\n'), ((1798, 1817), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1808, 1817), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1870), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', ' Val']"], {'loc': '"""upper right"""'}), "(['Train', ' Val'], loc='upper right')\n", (1832, 1870), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1950), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (1940, 1950), False, 'import os\n'), ((1966, 2025), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (["(dir_path + '//' + i)"], {'target_size': '(200, 200)'}), "(dir_path + '//' + i, target_size=(200, 200))\n", (1980, 2025), False, 'from tensorflow.keras.preprocessing import image\n'), ((2049, 2064), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2059, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2081, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2120), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2115, 2120), False, 'from tensorflow.keras.preprocessing import image\n'), ((2133, 2158), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2147, 2158), True, 'import numpy as np\n'), ((2175, 2189), 'numpy.vstack', 'np.vstack', (['[X]'], {}), '([X])\n', (2184, 2189), True, 'import numpy as np\n'), ((703, 788), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(200, 200, 3)'}), "(16, (3, 3), activation='relu', input_shape=(200, 200, 3)\n )\n", (725, 788), True, 'import tensorflow as tf\n'), ((790, 821), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', (['(2)', '(2)'], {}), '(2, 2)\n', (815, 821), True, 'import tensorflow as tf\n'), ((827, 880), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (849, 880), True, 'import tensorflow as tf\n'), ((882, 913), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', (['(2)', '(2)'], {}), '(2, 2)\n', (907, 913), True, 'import tensorflow as tf\n'), ((919, 944), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (942, 944), True, 'import tensorflow as tf\n'), ((946, 991), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (967, 991), True, 'import tensorflow as tf\n'), ((997, 1043), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1018, 1043), True, 'import tensorflow as tf\n'), ((1105, 1122), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (1112, 1122), False, 'from tensorflow.keras.optimizers import RMSprop\n')] |
#!/bin/python3
import sys
from collections import defaultdict
from skimage import io, util
import numpy as np
import math
import random
def distance(pixel1, pixel2):
return math.sqrt((float(pixel1[0]) - float(pixel2[0]))**2 +
(float(pixel1[1]) - float(pixel2[1]))**2 +
(float(pixel1[2]) - float(pixel2[2]))**2)
MAX_DISTANCE = distance((0, 0, 0), (255, 255, 255))
def norm_distance(pixel1, pixel2):
return distance(pixel1, pixel2) / MAX_DISTANCE
def simplify(image, iterations):
ret = np.array(image)
all_migrations = defaultdict(list)
for i in range(0, iterations):
for x in range(0, image.shape[0]):
for y in range(0, image.shape[1]):
for nx, ny in migrate(image, (x, y)):
all_migrations[(nx, ny)].append(image[x, y])
for coord, colors in all_migrations.items():
ret[coord[0], coord[1]] = colors[random.randint(0, len(colors) - 1)]
return ret
def migrate(image, idx):
offsets = ((-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1))
migrations = []
for dx, dy in offsets:
if 0 <= idx[0] + dx < image.shape[0] and 0 <= idx[1] + dy < image.shape[1]:
dist = norm_distance(image[idx[0] + dx, idx[1] + dy], image[idx[0], idx[1]])
if random.random() >= dist:
migrations.append((idx[0] + dx, idx[1] + dy))
return migrations
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: Enter name of a single image and the # of iterations to apply.")
sys.exit()
random.seed()
imname = sys.argv[1]
iterations = int(sys.argv[2])
image = io.imread(imname)
image_transformed = simplify(image, iterations)
io.imsave(imname + "_simplified", image_transformed)
| [
"random.seed",
"numpy.array",
"skimage.io.imread",
"collections.defaultdict",
"skimage.io.imsave",
"sys.exit",
"random.random"
] | [((547, 562), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (555, 562), True, 'import numpy as np\n'), ((585, 602), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (596, 602), False, 'from collections import defaultdict\n'), ((1654, 1667), 'random.seed', 'random.seed', ([], {}), '()\n', (1665, 1667), False, 'import random\n'), ((1741, 1758), 'skimage.io.imread', 'io.imread', (['imname'], {}), '(imname)\n', (1750, 1758), False, 'from skimage import io, util\n'), ((1817, 1869), 'skimage.io.imsave', 'io.imsave', (["(imname + '_simplified')", 'image_transformed'], {}), "(imname + '_simplified', image_transformed)\n", (1826, 1869), False, 'from skimage import io, util\n'), ((1638, 1648), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1646, 1648), False, 'import sys\n'), ((1377, 1392), 'random.random', 'random.random', ([], {}), '()\n', (1390, 1392), False, 'import random\n')] |
from __future__ import print_function
import numpy as np
import itertools
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
from pytest import warns as assert_warns
from scipy.spatial import SphericalVoronoi, distance
from scipy.spatial import _spherical_voronoi as spherical_voronoi
from scipy.spatial.transform import Rotation
from scipy.optimize import linear_sum_assignment
TOL = 1E-10
class TestSphericalVoronoi(object):
def setup_method(self):
self.points = np.array([
[-0.78928481, -0.16341094, 0.59188373],
[-0.66839141, 0.73309634, 0.12578818],
[0.32535778, -0.92476944, -0.19734181],
[-0.90177102, -0.03785291, -0.43055335],
[0.71781344, 0.68428936, 0.12842096],
[-0.96064876, 0.23492353, -0.14820556],
[0.73181537, -0.22025898, -0.6449281],
[0.79979205, 0.54555747, 0.25039913]]
)
# Issue #9386
self.hemisphere_points = np.array([
[0.88610999, -0.42383021, 0.18755541],
[0.51980039, -0.72622668, 0.4498915],
[0.56540011, -0.81629197, -0.11827989],
[0.69659682, -0.69972598, 0.15854467]])
# Issue #8859
phi = np.linspace(0, 2 * np.pi, 10, endpoint=False) # azimuth angle
theta = np.linspace(0.001, np.pi * 0.4, 5) # polar angle
theta = theta[np.newaxis, :].T
phiv, thetav = np.meshgrid(phi, theta)
phiv = np.reshape(phiv, (50, 1))
thetav = np.reshape(thetav, (50, 1))
x = np.cos(phiv) * np.sin(thetav)
y = np.sin(phiv) * np.sin(thetav)
z = np.cos(thetav)
self.hemisphere_points2 = np.concatenate([x, y, z], axis=1)
def test_constructor(self):
center = np.array([1, 2, 3])
radius = 2
s1 = SphericalVoronoi(self.points)
# user input checks in SphericalVoronoi now require
# the radius / center to match the generators so adjust
# accordingly here
s2 = SphericalVoronoi(self.points * radius, radius)
s3 = SphericalVoronoi(self.points + center, center=center)
s4 = SphericalVoronoi(self.points * radius + center, radius, center)
assert_array_equal(s1.center, np.array([0, 0, 0]))
assert_equal(s1.radius, 1)
assert_array_equal(s2.center, np.array([0, 0, 0]))
assert_equal(s2.radius, 2)
assert_array_equal(s3.center, center)
assert_equal(s3.radius, 1)
assert_array_equal(s4.center, center)
assert_equal(s4.radius, radius)
def test_vertices_regions_translation_invariance(self):
sv_origin = SphericalVoronoi(self.points)
center = np.array([1, 1, 1])
sv_translated = SphericalVoronoi(self.points + center, center=center)
assert_equal(sv_origin.regions, sv_translated.regions)
assert_array_almost_equal(sv_origin.vertices + center,
sv_translated.vertices)
def test_vertices_regions_scaling_invariance(self):
sv_unit = SphericalVoronoi(self.points)
sv_scaled = SphericalVoronoi(self.points * 2, 2)
assert_equal(sv_unit.regions, sv_scaled.regions)
assert_array_almost_equal(sv_unit.vertices * 2,
sv_scaled.vertices)
def test_old_radius_api(self):
sv_unit = SphericalVoronoi(self.points, radius=1)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`radius` is `None`")
sv = SphericalVoronoi(self.points, None)
assert_array_almost_equal(sv_unit.vertices, sv.vertices)
def test_old_radius_api_warning(self):
with assert_warns(DeprecationWarning):
sv = SphericalVoronoi(self.points, None)
def test_sort_vertices_of_regions(self):
sv = SphericalVoronoi(self.points)
unsorted_regions = sv.regions
sv.sort_vertices_of_regions()
assert_equal(sorted(sv.regions), sorted(unsorted_regions))
def test_sort_vertices_of_regions_flattened(self):
expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1],
[4, 8, 7, 5, 6], [9, 11, 10], [2, 7, 5],
[1, 4, 8, 11, 9], [0, 3, 10, 9, 1]])
expected = list(itertools.chain(*sorted(expected)))
sv = SphericalVoronoi(self.points)
sv.sort_vertices_of_regions()
actual = list(itertools.chain(*sorted(sv.regions)))
assert_array_equal(actual, expected)
def test_sort_vertices_of_regions_dimensionality(self):
points = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0.5, 0.5, 0.5, 0.5]])
with pytest.raises(TypeError, match="three-dimensional"):
sv = spherical_voronoi.SphericalVoronoi(points)
sv.sort_vertices_of_regions()
def test_num_vertices(self):
# for any n >= 3, a spherical Voronoi diagram has 2n - 4
# vertices; this is a direct consequence of Euler's formula
# as explained by <NAME> Mamede (2010) Proceedings of the
# 2010 International Symposium on Voronoi Diagrams in Science
# and Engineering
sv = SphericalVoronoi(self.points)
expected = self.points.shape[0] * 2 - 4
actual = sv.vertices.shape[0]
assert_equal(actual, expected)
def test_voronoi_circles(self):
sv = spherical_voronoi.SphericalVoronoi(self.points)
for vertex in sv.vertices:
distances = distance.cdist(sv.points, np.array([vertex]))
closest = np.array(sorted(distances)[0:3])
assert_almost_equal(closest[0], closest[1], 7, str(vertex))
assert_almost_equal(closest[0], closest[2], 7, str(vertex))
def test_duplicate_point_handling(self):
# an exception should be raised for degenerate generators
# related to Issue# 7046
self.degenerate = np.concatenate((self.points, self.points))
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.degenerate)
def test_incorrect_radius_handling(self):
# an exception should be raised if the radius provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
radius=0.98)
def test_incorrect_center_handling(self):
# an exception should be raised if the center provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
center=[0.1, 0, 0])
def test_single_hemisphere_handling(self):
# Test solution of Issues #9386, #8859
for points in [self.hemisphere_points, self.hemisphere_points2]:
sv = SphericalVoronoi(points)
triangles = sv._tri.points[sv._tri.simplices]
dots = np.einsum('ij,ij->i', sv.vertices, triangles[:, 0])
circumradii = np.arccos(np.clip(dots, -1, 1))
assert np.max(circumradii) > np.pi / 2
def test_rank_deficient(self):
# rank-1 input cannot be triangulated
points = np.array([[-1, 0, 0], [1, 0, 0]])
with pytest.raises(ValueError, match="Rank of input points"):
sv = spherical_voronoi.SphericalVoronoi(points)
@pytest.mark.parametrize("n", [8, 15, 21])
@pytest.mark.parametrize("radius", [0.5, 1, 2])
@pytest.mark.parametrize("center", [(0, 0, 0), (1, 2, 3)])
def test_geodesic_input(self, n, radius, center):
U = Rotation.random(random_state=0).as_matrix()
thetas = np.linspace(0, 2 * np.pi, n, endpoint=False)
points = np.vstack([np.sin(thetas), np.cos(thetas), np.zeros(n)]).T
points = radius * points @ U
sv = SphericalVoronoi(points + center, radius=radius, center=center)
# each region must have 4 vertices
region_sizes = np.array([len(region) for region in sv.regions])
assert (region_sizes == 4).all()
regions = np.array(sv.regions)
# vertices are those between each pair of input points + north and
# south poles
vertices = sv.vertices - center
assert len(vertices) == n + 2
# verify that north and south poles are orthogonal to geodesic on which
# input points lie
poles = vertices[n:]
assert np.abs(np.dot(points, poles.T)).max() < 1E-10
for point, region in zip(points, sv.regions):
cosine = np.dot(vertices[region], point)
sine = np.linalg.norm(np.cross(vertices[region], point), axis=1)
arclengths = radius * np.arctan2(sine, cosine)
# test arc lengths to poles
assert_almost_equal(arclengths[[1, 3]], radius * np.pi / 2)
# test arc lengths to forward and backward neighbors
assert_almost_equal(arclengths[[0, 2]], radius * np.pi / n)
regions = sv.regions.copy()
sv.sort_vertices_of_regions()
assert regions == sv.regions
@pytest.mark.parametrize("dim", range(2, 7))
def test_higher_dimensions(self, dim):
n = 100
rng = np.random.RandomState(seed=0)
points = rng.randn(n, dim)
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
sv = SphericalVoronoi(points)
assert sv.vertices.shape[1] == dim
assert len(sv.regions) == n
# verify Euler characteristic
cell_counts = []
simplices = np.sort(sv._tri.simplices)
for i in range(1, dim + 1):
cells = []
for indices in itertools.combinations(range(dim), i):
cells.append(simplices[:, list(indices)])
cells = np.unique(np.concatenate(cells), axis=0)
cell_counts.append(len(cells))
expected_euler = 1 + (-1)**(dim-1)
actual_euler = sum([(-1)**i * e for i, e in enumerate(cell_counts)])
assert expected_euler == actual_euler
@pytest.mark.parametrize("dim", range(2, 7))
def test_cross_polytope_regions(self, dim):
# The hypercube is the dual of the cross-polytope, so the voronoi
# vertices of the cross-polytope lie on the points of the hypercube.
# generate points of the cross-polytope
points = np.concatenate((-np.eye(dim), np.eye(dim)))
sv = SphericalVoronoi(points)
assert all([len(e) == 2**(dim - 1) for e in sv.regions])
# generate points of the hypercube
expected = np.vstack(list(itertools.product([-1, 1], repeat=dim)))
expected = expected.astype(np.float) / np.sqrt(dim)
# test that Voronoi vertices are correctly placed
dist = distance.cdist(sv.vertices, expected)
res = linear_sum_assignment(dist)
assert dist[res].sum() < TOL
@pytest.mark.parametrize("dim", range(2, 4))
def test_hypercube_regions(self, dim):
# The cross-polytope is the dual of the hypercube, so the voronoi
# vertices of the hypercube lie on the points of the cross-polytope.
# generate points of the hypercube
points = np.vstack(list(itertools.product([-1, 1], repeat=dim)))
points = points.astype(np.float) / np.sqrt(dim)
sv = SphericalVoronoi(points)
# generate points of the cross-polytope
expected = np.concatenate((-np.eye(dim), np.eye(dim)))
# test that Voronoi vertices are correctly placed
dist = distance.cdist(sv.vertices, expected)
res = linear_sum_assignment(dist)
assert dist[res].sum() < TOL
| [
"numpy.clip",
"numpy.testing.suppress_warnings",
"numpy.sqrt",
"numpy.testing.assert_equal",
"scipy.spatial._spherical_voronoi.SphericalVoronoi",
"numpy.array",
"numpy.einsum",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.sin",
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equ... | [((7800, 7841), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[8, 15, 21]'], {}), "('n', [8, 15, 21])\n", (7823, 7841), False, 'import pytest\n'), ((7847, 7893), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""radius"""', '[0.5, 1, 2]'], {}), "('radius', [0.5, 1, 2])\n", (7870, 7893), False, 'import pytest\n'), ((7899, 7956), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""center"""', '[(0, 0, 0), (1, 2, 3)]'], {}), "('center', [(0, 0, 0), (1, 2, 3)])\n", (7922, 7956), False, 'import pytest\n'), ((724, 1068), 'numpy.array', 'np.array', (['[[-0.78928481, -0.16341094, 0.59188373], [-0.66839141, 0.73309634, \n 0.12578818], [0.32535778, -0.92476944, -0.19734181], [-0.90177102, -\n 0.03785291, -0.43055335], [0.71781344, 0.68428936, 0.12842096], [-\n 0.96064876, 0.23492353, -0.14820556], [0.73181537, -0.22025898, -\n 0.6449281], [0.79979205, 0.54555747, 0.25039913]]'], {}), '([[-0.78928481, -0.16341094, 0.59188373], [-0.66839141, 0.73309634,\n 0.12578818], [0.32535778, -0.92476944, -0.19734181], [-0.90177102, -\n 0.03785291, -0.43055335], [0.71781344, 0.68428936, 0.12842096], [-\n 0.96064876, 0.23492353, -0.14820556], [0.73181537, -0.22025898, -\n 0.6449281], [0.79979205, 0.54555747, 0.25039913]])\n', (732, 1068), True, 'import numpy as np\n'), ((1212, 1388), 'numpy.array', 'np.array', (['[[0.88610999, -0.42383021, 0.18755541], [0.51980039, -0.72622668, 0.4498915\n ], [0.56540011, -0.81629197, -0.11827989], [0.69659682, -0.69972598, \n 0.15854467]]'], {}), '([[0.88610999, -0.42383021, 0.18755541], [0.51980039, -0.72622668, \n 0.4498915], [0.56540011, -0.81629197, -0.11827989], [0.69659682, -\n 0.69972598, 0.15854467]])\n', (1220, 1388), True, 'import numpy as np\n'), ((1465, 1510), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(10)'], {'endpoint': '(False)'}), '(0, 2 * np.pi, 10, endpoint=False)\n', (1476, 1510), True, 'import numpy as np\n'), ((1546, 1580), 'numpy.linspace', 'np.linspace', (['(0.001)', '(np.pi * 0.4)', '(5)'], {}), '(0.001, np.pi * 0.4, 5)\n', (1557, 1580), True, 'import numpy as np\n'), ((1661, 1684), 'numpy.meshgrid', 'np.meshgrid', (['phi', 'theta'], {}), '(phi, theta)\n', (1672, 1684), True, 'import numpy as np\n'), ((1700, 1725), 'numpy.reshape', 'np.reshape', (['phiv', '(50, 1)'], {}), '(phiv, (50, 1))\n', (1710, 1725), True, 'import numpy as np\n'), ((1743, 1770), 'numpy.reshape', 'np.reshape', (['thetav', '(50, 1)'], {}), '(thetav, (50, 1))\n', (1753, 1770), True, 'import numpy as np\n'), ((1868, 1882), 'numpy.cos', 'np.cos', (['thetav'], {}), '(thetav)\n', (1874, 1882), True, 'import numpy as np\n'), ((1917, 1950), 'numpy.concatenate', 'np.concatenate', (['[x, y, z]'], {'axis': '(1)'}), '([x, y, z], axis=1)\n', (1931, 1950), True, 'import numpy as np\n'), ((2001, 2020), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2009, 2020), True, 'import numpy as np\n'), ((2053, 2082), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (2069, 2082), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((2247, 2293), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['(self.points * radius)', 'radius'], {}), '(self.points * radius, radius)\n', (2263, 2293), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((2307, 2360), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['(self.points + center)'], {'center': 'center'}), '(self.points + center, center=center)\n', (2323, 2360), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((2374, 2437), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['(self.points * radius + center)', 'radius', 'center'], {}), '(self.points * radius + center, radius, center)\n', (2390, 2437), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((2505, 2531), 'numpy.testing.assert_equal', 'assert_equal', (['s1.radius', '(1)'], {}), '(s1.radius, 1)\n', (2517, 2531), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((2599, 2625), 'numpy.testing.assert_equal', 'assert_equal', (['s2.radius', '(2)'], {}), '(s2.radius, 2)\n', (2611, 2625), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((2634, 2671), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['s3.center', 'center'], {}), '(s3.center, center)\n', (2652, 2671), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((2680, 2706), 'numpy.testing.assert_equal', 'assert_equal', (['s3.radius', '(1)'], {}), '(s3.radius, 1)\n', (2692, 2706), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((2715, 2752), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['s4.center', 'center'], {}), '(s4.center, center)\n', (2733, 2752), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((2761, 2792), 'numpy.testing.assert_equal', 'assert_equal', (['s4.radius', 'radius'], {}), '(s4.radius, radius)\n', (2773, 2792), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((2874, 2903), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (2890, 2903), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((2921, 2940), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (2929, 2940), True, 'import numpy as np\n'), ((2965, 3018), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['(self.points + center)'], {'center': 'center'}), '(self.points + center, center=center)\n', (2981, 3018), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((3027, 3081), 'numpy.testing.assert_equal', 'assert_equal', (['sv_origin.regions', 'sv_translated.regions'], {}), '(sv_origin.regions, sv_translated.regions)\n', (3039, 3081), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((3090, 3168), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['(sv_origin.vertices + center)', 'sv_translated.vertices'], {}), '(sv_origin.vertices + center, sv_translated.vertices)\n', (3115, 3168), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((3278, 3307), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (3294, 3307), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((3328, 3364), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['(self.points * 2)', '(2)'], {}), '(self.points * 2, 2)\n', (3344, 3364), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((3373, 3421), 'numpy.testing.assert_equal', 'assert_equal', (['sv_unit.regions', 'sv_scaled.regions'], {}), '(sv_unit.regions, sv_scaled.regions)\n', (3385, 3421), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((3430, 3497), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['(sv_unit.vertices * 2)', 'sv_scaled.vertices'], {}), '(sv_unit.vertices * 2, sv_scaled.vertices)\n', (3455, 3497), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((3586, 3625), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {'radius': '(1)'}), '(self.points, radius=1)\n', (3602, 3625), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((4057, 4086), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (4073, 4086), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((4571, 4600), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (4587, 4600), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((4707, 4743), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4725, 4743), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((4822, 4914), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0.5, 0.5, 0.5, 0.5]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0.5, 0.5,\n 0.5, 0.5]])\n', (4830, 4914), True, 'import numpy as np\n'), ((5529, 5558), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (5545, 5558), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((5653, 5683), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5665, 5683), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((5734, 5781), 'scipy.spatial._spherical_voronoi.SphericalVoronoi', 'spherical_voronoi.SphericalVoronoi', (['self.points'], {}), '(self.points)\n', (5768, 5781), True, 'from scipy.spatial import _spherical_voronoi as spherical_voronoi\n'), ((6257, 6299), 'numpy.concatenate', 'np.concatenate', (['(self.points, self.points)'], {}), '((self.points, self.points))\n', (6271, 6299), True, 'import numpy as np\n'), ((7630, 7663), 'numpy.array', 'np.array', (['[[-1, 0, 0], [1, 0, 0]]'], {}), '([[-1, 0, 0], [1, 0, 0]])\n', (7638, 7663), True, 'import numpy as np\n'), ((8084, 8128), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n'], {'endpoint': '(False)'}), '(0, 2 * np.pi, n, endpoint=False)\n', (8095, 8128), True, 'import numpy as np\n'), ((8255, 8318), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['(points + center)'], {'radius': 'radius', 'center': 'center'}), '(points + center, radius=radius, center=center)\n', (8271, 8318), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((8494, 8514), 'numpy.array', 'np.array', (['sv.regions'], {}), '(sv.regions)\n', (8502, 8514), True, 'import numpy as np\n'), ((9617, 9646), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (9638, 9646), True, 'import numpy as np\n'), ((9759, 9783), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['points'], {}), '(points)\n', (9775, 9783), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((9947, 9973), 'numpy.sort', 'np.sort', (['sv._tri.simplices'], {}), '(sv._tri.simplices)\n', (9954, 9973), True, 'import numpy as np\n'), ((10799, 10823), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['points'], {}), '(points)\n', (10815, 10823), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((11142, 11179), 'scipy.spatial.distance.cdist', 'distance.cdist', (['sv.vertices', 'expected'], {}), '(sv.vertices, expected)\n', (11156, 11179), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((11194, 11221), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['dist'], {}), '(dist)\n', (11215, 11221), False, 'from scipy.optimize import linear_sum_assignment\n'), ((11689, 11713), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['points'], {}), '(points)\n', (11705, 11713), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((11900, 11937), 'scipy.spatial.distance.cdist', 'distance.cdist', (['sv.vertices', 'expected'], {}), '(sv.vertices, expected)\n', (11914, 11937), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((11952, 11979), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['dist'], {}), '(dist)\n', (11973, 11979), False, 'from scipy.optimize import linear_sum_assignment\n'), ((1784, 1796), 'numpy.cos', 'np.cos', (['phiv'], {}), '(phiv)\n', (1790, 1796), True, 'import numpy as np\n'), ((1799, 1813), 'numpy.sin', 'np.sin', (['thetav'], {}), '(thetav)\n', (1805, 1813), True, 'import numpy as np\n'), ((1826, 1838), 'numpy.sin', 'np.sin', (['phiv'], {}), '(phiv)\n', (1832, 1838), True, 'import numpy as np\n'), ((1841, 1855), 'numpy.sin', 'np.sin', (['thetav'], {}), '(thetav)\n', (1847, 1855), True, 'import numpy as np\n'), ((2476, 2495), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2484, 2495), True, 'import numpy as np\n'), ((2570, 2589), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2578, 2589), True, 'import numpy as np\n'), ((3639, 3658), 'numpy.testing.suppress_warnings', 'suppress_warnings', ([], {}), '()\n', (3656, 3658), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((3749, 3784), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points', 'None'], {}), '(self.points, None)\n', (3765, 3784), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((3797, 3853), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['sv_unit.vertices', 'sv.vertices'], {}), '(sv_unit.vertices, sv.vertices)\n', (3822, 3853), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((3911, 3943), 'pytest.warns', 'assert_warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (3923, 3943), True, 'from pytest import warns as assert_warns\n'), ((3962, 3997), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['self.points', 'None'], {}), '(self.points, None)\n', (3978, 3997), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((5032, 5083), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""three-dimensional"""'}), "(TypeError, match='three-dimensional')\n", (5045, 5083), False, 'import pytest\n'), ((5102, 5144), 'scipy.spatial._spherical_voronoi.SphericalVoronoi', 'spherical_voronoi.SphericalVoronoi', (['points'], {}), '(points)\n', (5136, 5144), True, 'from scipy.spatial import _spherical_voronoi as spherical_voronoi\n'), ((6313, 6338), 'pytest.raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (6326, 6338), True, 'from pytest import raises as assert_raises\n'), ((6357, 6408), 'scipy.spatial._spherical_voronoi.SphericalVoronoi', 'spherical_voronoi.SphericalVoronoi', (['self.degenerate'], {}), '(self.degenerate)\n', (6391, 6408), True, 'from scipy.spatial import _spherical_voronoi as spherical_voronoi\n'), ((6585, 6610), 'pytest.raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (6598, 6610), True, 'from pytest import raises as assert_raises\n'), ((6629, 6689), 'scipy.spatial._spherical_voronoi.SphericalVoronoi', 'spherical_voronoi.SphericalVoronoi', (['self.points'], {'radius': '(0.98)'}), '(self.points, radius=0.98)\n', (6663, 6689), True, 'from scipy.spatial import _spherical_voronoi as spherical_voronoi\n'), ((6918, 6943), 'pytest.raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (6931, 6943), True, 'from pytest import raises as assert_raises\n'), ((6962, 7029), 'scipy.spatial._spherical_voronoi.SphericalVoronoi', 'spherical_voronoi.SphericalVoronoi', (['self.points'], {'center': '[0.1, 0, 0]'}), '(self.points, center=[0.1, 0, 0])\n', (6996, 7029), True, 'from scipy.spatial import _spherical_voronoi as spherical_voronoi\n'), ((7268, 7292), 'scipy.spatial.SphericalVoronoi', 'SphericalVoronoi', (['points'], {}), '(points)\n', (7284, 7292), False, 'from scipy.spatial import SphericalVoronoi, distance\n'), ((7370, 7421), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'sv.vertices', 'triangles[:, 0]'], {}), "('ij,ij->i', sv.vertices, triangles[:, 0])\n", (7379, 7421), True, 'import numpy as np\n'), ((7677, 7732), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Rank of input points"""'}), "(ValueError, match='Rank of input points')\n", (7690, 7732), False, 'import pytest\n'), ((7751, 7793), 'scipy.spatial._spherical_voronoi.SphericalVoronoi', 'spherical_voronoi.SphericalVoronoi', (['points'], {}), '(points)\n', (7785, 7793), True, 'from scipy.spatial import _spherical_voronoi as spherical_voronoi\n'), ((8965, 8996), 'numpy.dot', 'np.dot', (['vertices[region]', 'point'], {}), '(vertices[region], point)\n', (8971, 8996), True, 'import numpy as np\n'), ((9185, 9244), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['arclengths[[1, 3]]', '(radius * np.pi / 2)'], {}), '(arclengths[[1, 3]], radius * np.pi / 2)\n', (9204, 9244), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((9322, 9381), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['arclengths[[0, 2]]', '(radius * np.pi / n)'], {}), '(arclengths[[0, 2]], radius * np.pi / n)\n', (9341, 9381), False, 'from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, suppress_warnings\n'), ((9700, 9730), 'numpy.linalg.norm', 'np.linalg.norm', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (9714, 9730), True, 'import numpy as np\n'), ((11055, 11067), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (11062, 11067), True, 'import numpy as np\n'), ((11663, 11675), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (11670, 11675), True, 'import numpy as np\n'), ((5867, 5885), 'numpy.array', 'np.array', (['[vertex]'], {}), '([vertex])\n', (5875, 5885), True, 'import numpy as np\n'), ((7458, 7478), 'numpy.clip', 'np.clip', (['dots', '(-1)', '(1)'], {}), '(dots, -1, 1)\n', (7465, 7478), True, 'import numpy as np\n'), ((7499, 7518), 'numpy.max', 'np.max', (['circumradii'], {}), '(circumradii)\n', (7505, 7518), True, 'import numpy as np\n'), ((8023, 8054), 'scipy.spatial.transform.Rotation.random', 'Rotation.random', ([], {'random_state': '(0)'}), '(random_state=0)\n', (8038, 8054), False, 'from scipy.spatial.transform import Rotation\n'), ((9031, 9064), 'numpy.cross', 'np.cross', (['vertices[region]', 'point'], {}), '(vertices[region], point)\n', (9039, 9064), True, 'import numpy as np\n'), ((9108, 9132), 'numpy.arctan2', 'np.arctan2', (['sine', 'cosine'], {}), '(sine, cosine)\n', (9118, 9132), True, 'import numpy as np\n'), ((10187, 10208), 'numpy.concatenate', 'np.concatenate', (['cells'], {}), '(cells)\n', (10201, 10208), True, 'import numpy as np\n'), ((10772, 10783), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (10778, 10783), True, 'import numpy as np\n'), ((10967, 11005), 'itertools.product', 'itertools.product', (['[-1, 1]'], {'repeat': 'dim'}), '([-1, 1], repeat=dim)\n', (10984, 11005), False, 'import itertools\n'), ((11579, 11617), 'itertools.product', 'itertools.product', (['[-1, 1]'], {'repeat': 'dim'}), '([-1, 1], repeat=dim)\n', (11596, 11617), False, 'import itertools\n'), ((11812, 11823), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (11818, 11823), True, 'import numpy as np\n'), ((8157, 8171), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (8163, 8171), True, 'import numpy as np\n'), ((8173, 8187), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (8179, 8187), True, 'import numpy as np\n'), ((8189, 8200), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8197, 8200), True, 'import numpy as np\n'), ((10759, 10770), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (10765, 10770), True, 'import numpy as np\n'), ((11799, 11810), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (11805, 11810), True, 'import numpy as np\n'), ((8850, 8873), 'numpy.dot', 'np.dot', (['points', 'poles.T'], {}), '(points, poles.T)\n', (8856, 8873), True, 'import numpy as np\n')] |
"""Calculate elasticity coefficients.
Functions to calculate elasticity coefficients for various community
quantities.
"""
from functools import partial
import pandas as pd
import numpy as np
from cobra.util import get_context
from micom.util import reset_min_community_growth
from micom.problems import regularize_l2_norm
from micom.solution import optimize_with_fraction
from rich.progress import track
STEP = 0.1
def _get_fluxes(sol, reactions):
"""Get the primal values for a set of variables."""
fluxes = {
r.id: sol.fluxes.loc[r.community_id, r.global_id] for r in reactions
}
return pd.Series(fluxes)
def _derivatives(before, after):
"""Get the elasticities for fluxes."""
before_signs = np.sign(before)
after_signs = np.sign(after)
if any(np.abs(before_signs - after_signs) > 2):
ValueError(
"Some of the fluxes changed sign. " "Can't compute elasticities :("
)
direction = np.repeat("zero", len(before)).astype("<U8")
direction[(before > 1e-6) | (after > 1e-6)] = "forward"
direction[(before < -1e-6) | (after < -1e-6)] = "reverse"
derivs = (np.log(after.abs() + 1e-6) - np.log(before.abs() + 1e-6)) / STEP
return derivs, direction
def elasticities_by_medium(com, reactions, fraction, growth_rate, progress):
"""Get the elasticity coefficients for a set of variables.
Arguments
---------
com : micom.Community
The community for wrhich to calculate elasticities.
variables : list of optlang.Variable
The variables for which to calculate the elasticities. All of these
must have non-zero primal vaues in the previous solution.
Returns
-------
pandas.Dataframe
The long/tidy version of the elasticities. Contains columns variable,
effector, and elasticity.
"""
regularize_l2_norm(com, 0.0)
sol = optimize_with_fraction(com, fraction, growth_rate, True)
before = _get_fluxes(sol, reactions)
import_fluxes = pd.Series()
dfs = []
for ex in com.exchanges:
export = len(ex.reactants) == 1
flux = sol.fluxes.loc[ex.community_id, ex.global_id]
if export and (flux < -1e-6):
import_fluxes[ex] = flux
elif not export and (flux > 1e-6):
import_fluxes[ex] = -flux
else:
continue
fluxes = import_fluxes.index
if progress:
fluxes = track(fluxes, description="Metabolites")
for r in fluxes:
flux = import_fluxes[r]
with com:
if flux < -1e-6:
r.lower_bound *= np.exp(STEP)
else:
r.upper_bound *= np.exp(STEP)
sol = optimize_with_fraction(com, fraction, growth_rate, True)
after = _get_fluxes(sol, reactions)
deriv, dirs = _derivatives(before, after)
res = pd.DataFrame(
{
"reaction": [rx.global_id for rx in reactions],
"taxon": [list(r.compartments)[0] for r in reactions],
"effector": r.id,
"direction": dirs,
"elasticity": deriv,
}
)
dfs.append(res)
return pd.concat(dfs)
def elasticities_by_abundance(com, reactions, fraction, growth_rate, progress):
"""Get the elasticity coefficients for a set of variables.
Arguments
---------
com : micom.Community
The community for which to calculate elasticities.
variables : list of optlang.Variable
The variables for which to calculate the elasticities. All of these
must have non-zero primal vaues in the previous solution.
Returns
-------
pandas.Dataframe
The long/tidy version of the elasticities. Contains columns variable,
effector, and elasticity.
"""
regularize_l2_norm(com, 0.0)
sol = optimize_with_fraction(com, fraction, growth_rate, True)
before = _get_fluxes(sol, reactions)
dfs = []
abundance = com.abundances.copy()
taxa = abundance.index
if progress:
taxa = track(taxa, description="Taxa")
for sp in taxa:
old = abundance[sp]
abundance.loc[sp] *= np.exp(STEP)
com.set_abundance(abundance, normalize=False)
sol = optimize_with_fraction(com, fraction, growth_rate, True)
after = _get_fluxes(sol, reactions)
abundance.loc[sp] = old
com.set_abundance(abundance, normalize=False)
deriv, dirs = _derivatives(before, after)
res = pd.DataFrame(
{
"reaction": [r.global_id for r in reactions],
"taxon": [list(r.compartments)[0] for r in reactions],
"effector": sp,
"direction": dirs,
"elasticity": deriv,
}
)
dfs.append(res)
return pd.concat(dfs)
def elasticities(com, fraction=0.5, reactions=None, progress=True):
"""Calculate elasticities for reactions.
Calculates elasticity coefficients using the specified reactions as
response and exchange bounds (diet) and taxa abundances as
effectors/parameters. Will use an arbitrary flux distribution as base.
Arguments
---------
com : micom.Community
The community for wrhich to calculate elasticities.
fraction : double
The tradeoff to use for the cooperative tradeoff method. Fraction of
maximal community growth to enforce.
reactions : iterable
A list of reactions to get elasticities for. Elements can either be
reactions from the model, strings specifying the ids of reactions
or ints specifying the indices of reactions. Defaults to using all
reactions.
progress : boolean
Whether to shwo progress bars. Will show two, one for the diet
optimizations and another one for the taxa abundances.
Returns
-------
pandas.DataFrame
A data frame with the following columns:
"reaction" - the exchange reaction (response),
"taxon" - the taxon the reaction is from,
"effector" - the parameter that was changed,
"direction" - whether the flux runs in the forward or reverse
direction,
"elasticity" - the elasticity coefficient,
"type" - the type of effector either "exchange" for diet or "abundance"
for taxa abundances.
"""
growth_rate = None
if reactions is None:
reactions = com.reactions
reactions = com.reactions.get_by_any(reactions)
with com:
context = get_context(com)
context(partial(reset_min_community_growth, com))
by_medium = elasticities_by_medium(
com, reactions, fraction, growth_rate, progress
)
by_medium["type"] = "exchanges"
by_abundance = elasticities_by_abundance(
com, reactions, fraction, growth_rate, progress
)
by_abundance["type"] = "abundance"
both = pd.concat([by_medium, by_abundance]).reset_index(drop=True)
both.loc[both.taxon == "m", "taxon"] = "medium"
return both
| [
"pandas.Series",
"cobra.util.get_context",
"micom.problems.regularize_l2_norm",
"numpy.abs",
"numpy.exp",
"functools.partial",
"numpy.sign",
"micom.solution.optimize_with_fraction",
"pandas.concat",
"rich.progress.track"
] | [((620, 637), 'pandas.Series', 'pd.Series', (['fluxes'], {}), '(fluxes)\n', (629, 637), True, 'import pandas as pd\n'), ((735, 750), 'numpy.sign', 'np.sign', (['before'], {}), '(before)\n', (742, 750), True, 'import numpy as np\n'), ((769, 783), 'numpy.sign', 'np.sign', (['after'], {}), '(after)\n', (776, 783), True, 'import numpy as np\n'), ((1847, 1875), 'micom.problems.regularize_l2_norm', 'regularize_l2_norm', (['com', '(0.0)'], {}), '(com, 0.0)\n', (1865, 1875), False, 'from micom.problems import regularize_l2_norm\n'), ((1886, 1942), 'micom.solution.optimize_with_fraction', 'optimize_with_fraction', (['com', 'fraction', 'growth_rate', '(True)'], {}), '(com, fraction, growth_rate, True)\n', (1908, 1942), False, 'from micom.solution import optimize_with_fraction\n'), ((2004, 2015), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2013, 2015), True, 'import pandas as pd\n'), ((3186, 3200), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (3195, 3200), True, 'import pandas as pd\n'), ((3813, 3841), 'micom.problems.regularize_l2_norm', 'regularize_l2_norm', (['com', '(0.0)'], {}), '(com, 0.0)\n', (3831, 3841), False, 'from micom.problems import regularize_l2_norm\n'), ((3852, 3908), 'micom.solution.optimize_with_fraction', 'optimize_with_fraction', (['com', 'fraction', 'growth_rate', '(True)'], {}), '(com, fraction, growth_rate, True)\n', (3874, 3908), False, 'from micom.solution import optimize_with_fraction\n'), ((4828, 4842), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (4837, 4842), True, 'import pandas as pd\n'), ((2419, 2459), 'rich.progress.track', 'track', (['fluxes'], {'description': '"""Metabolites"""'}), "(fluxes, description='Metabolites')\n", (2424, 2459), False, 'from rich.progress import track\n'), ((4062, 4093), 'rich.progress.track', 'track', (['taxa'], {'description': '"""Taxa"""'}), "(taxa, description='Taxa')\n", (4067, 4093), False, 'from rich.progress import track\n'), ((4171, 4183), 'numpy.exp', 'np.exp', (['STEP'], {}), '(STEP)\n', (4177, 4183), True, 'import numpy as np\n'), ((4252, 4308), 'micom.solution.optimize_with_fraction', 'optimize_with_fraction', (['com', 'fraction', 'growth_rate', '(True)'], {}), '(com, fraction, growth_rate, True)\n', (4274, 4308), False, 'from micom.solution import optimize_with_fraction\n'), ((6535, 6551), 'cobra.util.get_context', 'get_context', (['com'], {}), '(com)\n', (6546, 6551), False, 'from cobra.util import get_context\n'), ((795, 829), 'numpy.abs', 'np.abs', (['(before_signs - after_signs)'], {}), '(before_signs - after_signs)\n', (801, 829), True, 'import numpy as np\n'), ((2688, 2744), 'micom.solution.optimize_with_fraction', 'optimize_with_fraction', (['com', 'fraction', 'growth_rate', '(True)'], {}), '(com, fraction, growth_rate, True)\n', (2710, 2744), False, 'from micom.solution import optimize_with_fraction\n'), ((6568, 6608), 'functools.partial', 'partial', (['reset_min_community_growth', 'com'], {}), '(reset_min_community_growth, com)\n', (6575, 6608), False, 'from functools import partial\n'), ((6940, 6976), 'pandas.concat', 'pd.concat', (['[by_medium, by_abundance]'], {}), '([by_medium, by_abundance])\n', (6949, 6976), True, 'import pandas as pd\n'), ((2593, 2605), 'numpy.exp', 'np.exp', (['STEP'], {}), '(STEP)\n', (2599, 2605), True, 'import numpy as np\n'), ((2657, 2669), 'numpy.exp', 'np.exp', (['STEP'], {}), '(STEP)\n', (2663, 2669), True, 'import numpy as np\n')] |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
)
class TestLogSumExp(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
self.check_forward_option = {}
self.check_backward_option = {
'eps': 2.0 ** -5, 'rtol': 1e-4, 'atol': 1e-4}
self.check_double_backward_option = {
'eps': 2.0 ** -5, 'rtol': 1e-4, 'atol': 1e-4}
if self.dtype == numpy.float16:
self.check_forward_option = {'rtol': 1e-2, 'atol': 1e-2}
self.check_backward_option = {
'eps': 2.0 ** -3, 'rtol': 1e-1, 'atol': 1e-1}
self.check_double_backward_option = {
'eps': 2.0 ** -3, 'rtol': 1e-1, 'atol': 1e-1}
def check_forward(self, x_data, axis=None):
x = chainer.Variable(x_data)
y = functions.logsumexp(x, axis=axis)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = numpy.log(numpy.exp(self.x).sum(axis=axis))
testing.assert_allclose(
y_expect, y.data, **self.check_forward_option)
def test_forward_cpu(self):
self.check_forward(self.x)
def test_forward_axis_cpu(self):
for i in range(self.x.ndim):
self.check_forward(self.x, axis=i)
def test_forward_negative_axis_cpu(self):
self.check_forward(self.x, axis=-1)
def test_forward_multi_axis_cpu(self):
self.check_forward(self.x, axis=(0, 1))
def test_forward_multi_axis_invert_cpu(self):
self.check_forward(self.x, axis=(1, 0))
def test_forward_negative_multi_axis_cpu(self):
self.check_forward(self.x, axis=(0, -1))
def test_forward_negative_multi_axis_invert_cpu(self):
self.check_forward(self.x, axis=(-2, 0))
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
def test_forward_axis_gpu(self):
for i in range(self.x.ndim):
self.check_forward(cuda.to_gpu(self.x), axis=i)
@attr.gpu
def test_forward_negative_axis_gpu(self):
self.check_forward(cuda.to_gpu(self.x), axis=-1)
@attr.gpu
def test_forward_multi_axis_gpu(self):
self.check_forward(cuda.to_gpu(self.x), axis=(0, 1))
@attr.gpu
def test_forward_multi_axis_invert_gpu(self):
self.check_forward(cuda.to_gpu(self.x), axis=(1, 0))
@attr.gpu
def test_forward_negative_multi_axis_gpu(self):
self.check_forward(cuda.to_gpu(self.x), axis=(0, -1))
@attr.gpu
def test_forward_negative_multi_axis_invert_gpu(self):
self.check_forward(cuda.to_gpu(self.x), axis=(-2, 0))
def check_backward(self, x_data, y_grad, axis=None):
gradient_check.check_backward(
lambda x: functions.logsumexp(x, axis), x_data, y_grad,
**self.check_backward_option)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
def test_backward_axis_cpu(self):
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=i)) * self.gy
self.check_backward(self.x, gy, axis=i)
def test_backward_negative_axis_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=-1)) * self.gy
self.check_backward(self.x, gy, axis=-1)
def test_backward_multi_axis_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, 1))) * self.gy
self.check_backward(self.x, gy, axis=(0, 1))
def test_backward_multi_axis_invert_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(1, 0))) * self.gy
self.check_backward(self.x, gy, axis=(1, 0))
def test_backward_negative_multi_axis_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, -1))) * self.gy
self.check_backward(self.x, gy, axis=(0, -1))
def test_backward_negative_multi_axis_invert_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(-2, 0))) * self.gy
self.check_backward(self.x, gy, axis=(-2, 0))
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_axis_gpu(self):
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=i)) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=i)
@attr.gpu
def test_backward_negative_axis_gpu(self):
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=-1)) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=-1)
@attr.gpu
def test_backward_multi_axis_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, 1))) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=(0, 1))
@attr.gpu
def test_backward_multi_axis_invert_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(1, 0))) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=(1, 0))
@attr.gpu
def test_backward_negative_multi_axis_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, -1))) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=(0, -1))
@attr.gpu
def test_backward_negative_multi_axis_invert_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(-2, 0))) * self.gy
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(gy), axis=(-2, 0))
def check_double_backward(self, x_data, y_grad, x_grad_grad, axis=None):
gradient_check.check_double_backward(
lambda x: functions.logsumexp(x, axis), x_data, y_grad,
x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_option)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
def test_double_backward_axis_cpu(self):
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=i)) * self.gy
self.check_double_backward(self.x, gy, self.ggx, axis=i)
def test_double_backward_negative_axis_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=-1)) * self.gy
self.check_double_backward(self.x, gy, self.ggx, axis=-1)
def test_double_backward_multi_axis_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, 1))) * self.gy
self.check_double_backward(self.x, gy, self.ggx, axis=(0, 1))
def test_double_backward_multi_axis_invert_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(1, 0))) * self.gy
self.check_double_backward(self.x, gy, self.ggx, axis=(1, 0))
def test_double_backward_negative_multi_axis_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, -1))) * self.gy
self.check_double_backward(self.x, gy, self.ggx, axis=(0, -1))
def test_double_backward_negative_multi_axis_invert_cpu(self):
gy = numpy.ones_like(self.x.sum(axis=(-2, 0))) * self.gy
self.check_double_backward(self.x, gy, self.ggx, axis=(-2, 0))
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@attr.gpu
def test_double_backward_axis_gpu(self):
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=i)) * self.gy
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
axis=i)
@attr.gpu
def test_double_backward_negative_axis_gpu(self):
for i in range(self.x.ndim):
gy = numpy.ones_like(self.x.sum(axis=-1)) * self.gy
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
axis=-1)
@attr.gpu
def test_double_backward_multi_axis_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, 1))) * self.gy
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
axis=(0, 1))
@attr.gpu
def test_double_backward_multi_axis_invert_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(1, 0))) * self.gy
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
axis=(1, 0))
@attr.gpu
def test_double_backward_negative_multi_axis_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(0, -1))) * self.gy
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
axis=(0, -1))
@attr.gpu
def test_double_backward_negative_multi_axis_invert_gpu(self):
gy = numpy.ones_like(self.x.sum(axis=(-2, 0))) * self.gy
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
axis=(-2, 0))
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
functions.logsumexp(self.x, [0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
functions.logsumexp(self.x, (1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.logsumexp(self.x, (0, 0))
def test_pos_neg_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.logsumexp(self.x, (1, -2))
testing.run_module(__name__, __file__)
| [
"chainer.testing.parameterize",
"chainer.Variable",
"chainer.testing.run_module",
"numpy.exp",
"chainer.functions.logsumexp",
"numpy.random.uniform",
"chainer.testing.assert_allclose",
"chainer.backends.cuda.to_gpu"
] | [((209, 312), 'chainer.testing.parameterize', 'testing.parameterize', (["{'dtype': numpy.float16}", "{'dtype': numpy.float32}", "{'dtype': numpy.float64}"], {}), "({'dtype': numpy.float16}, {'dtype': numpy.float32}, {\n 'dtype': numpy.float64})\n", (229, 312), False, 'from chainer import testing\n'), ((9794, 9832), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (9812, 9832), False, 'from chainer import testing\n'), ((1233, 1257), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (1249, 1257), False, 'import chainer\n'), ((1270, 1303), 'chainer.functions.logsumexp', 'functions.logsumexp', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (1289, 1303), False, 'from chainer import functions\n'), ((1426, 1496), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['y_expect', 'y.data'], {}), '(y_expect, y.data, **self.check_forward_option)\n', (1449, 1496), False, 'from chainer import testing\n'), ((2267, 2286), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2278, 2286), False, 'from chainer.backends import cuda\n'), ((2525, 2544), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2536, 2544), False, 'from chainer.backends import cuda\n'), ((2640, 2659), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2651, 2659), False, 'from chainer.backends import cuda\n'), ((2766, 2785), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2777, 2785), False, 'from chainer.backends import cuda\n'), ((2894, 2913), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2905, 2913), False, 'from chainer.backends import cuda\n'), ((3030, 3049), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (3041, 3049), False, 'from chainer.backends import cuda\n'), ((4459, 4478), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (4470, 4478), False, 'from chainer.backends import cuda\n'), ((4480, 4500), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (4491, 4500), False, 'from chainer.backends import cuda\n'), ((5126, 5145), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (5137, 5145), False, 'from chainer.backends import cuda\n'), ((5147, 5162), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (5158, 5162), False, 'from chainer.backends import cuda\n'), ((5335, 5354), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (5346, 5354), False, 'from chainer.backends import cuda\n'), ((5356, 5371), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (5367, 5371), False, 'from chainer.backends import cuda\n'), ((5547, 5566), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (5558, 5566), False, 'from chainer.backends import cuda\n'), ((5568, 5583), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (5579, 5583), False, 'from chainer.backends import cuda\n'), ((5767, 5786), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (5778, 5786), False, 'from chainer.backends import cuda\n'), ((5788, 5803), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (5799, 5803), False, 'from chainer.backends import cuda\n'), ((7488, 7507), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (7499, 7507), False, 'from chainer.backends import cuda\n'), ((7509, 7529), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (7520, 7529), False, 'from chainer.backends import cuda\n'), ((7531, 7552), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (7542, 7552), False, 'from chainer.backends import cuda\n'), ((8345, 8364), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (8356, 8364), False, 'from chainer.backends import cuda\n'), ((8366, 8381), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (8377, 8381), False, 'from chainer.backends import cuda\n'), ((8383, 8404), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (8394, 8404), False, 'from chainer.backends import cuda\n'), ((8616, 8635), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (8627, 8635), False, 'from chainer.backends import cuda\n'), ((8637, 8652), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (8648, 8652), False, 'from chainer.backends import cuda\n'), ((8654, 8675), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (8665, 8675), False, 'from chainer.backends import cuda\n'), ((8890, 8909), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (8901, 8909), False, 'from chainer.backends import cuda\n'), ((8911, 8926), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (8922, 8926), False, 'from chainer.backends import cuda\n'), ((8928, 8949), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (8939, 8949), False, 'from chainer.backends import cuda\n'), ((9172, 9191), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (9183, 9191), False, 'from chainer.backends import cuda\n'), ((9193, 9208), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (9204, 9208), False, 'from chainer.backends import cuda\n'), ((9210, 9231), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (9221, 9231), False, 'from chainer.backends import cuda\n'), ((9353, 9385), 'chainer.functions.logsumexp', 'functions.logsumexp', (['self.x', '[0]'], {}), '(self.x, [0])\n', (9372, 9385), False, 'from chainer import functions\n'), ((9489, 9526), 'chainer.functions.logsumexp', 'functions.logsumexp', (['self.x', "(1, 'x')"], {}), "(self.x, (1, 'x'))\n", (9508, 9526), False, 'from chainer import functions\n'), ((9619, 9654), 'chainer.functions.logsumexp', 'functions.logsumexp', (['self.x', '(0, 0)'], {}), '(self.x, (0, 0))\n', (9638, 9654), False, 'from chainer import functions\n'), ((9755, 9791), 'chainer.functions.logsumexp', 'functions.logsumexp', (['self.x', '(1, -2)'], {}), '(self.x, (1, -2))\n', (9774, 9791), False, 'from chainer import functions\n'), ((402, 440), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2, 4)'], {}), '(-1, 1, (3, 2, 4))\n', (422, 440), False, 'import numpy\n'), ((478, 509), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '()'], {}), '(-1, 1, ())\n', (498, 509), False, 'import numpy\n'), ((548, 586), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2, 4)'], {}), '(-1, 1, (3, 2, 4))\n', (568, 586), False, 'import numpy\n'), ((2408, 2427), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2419, 2427), False, 'from chainer.backends import cuda\n'), ((3184, 3212), 'chainer.functions.logsumexp', 'functions.logsumexp', (['x', 'axis'], {}), '(x, axis)\n', (3203, 3212), False, 'from chainer import functions\n'), ((4687, 4706), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (4698, 4706), False, 'from chainer.backends import cuda\n'), ((4708, 4723), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (4719, 4723), False, 'from chainer.backends import cuda\n'), ((4928, 4947), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (4939, 4947), False, 'from chainer.backends import cuda\n'), ((4949, 4964), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (4960, 4964), False, 'from chainer.backends import cuda\n'), ((5965, 5993), 'chainer.functions.logsumexp', 'functions.logsumexp', (['x', 'axis'], {}), '(x, axis)\n', (5984, 5993), False, 'from chainer import functions\n'), ((7770, 7789), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (7781, 7789), False, 'from chainer.backends import cuda\n'), ((7791, 7806), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (7802, 7806), False, 'from chainer.backends import cuda\n'), ((7808, 7829), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (7819, 7829), False, 'from chainer.backends import cuda\n'), ((8081, 8100), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (8092, 8100), False, 'from chainer.backends import cuda\n'), ((8102, 8117), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['gy'], {}), '(gy)\n', (8113, 8117), False, 'from chainer.backends import cuda\n'), ((8119, 8140), 'chainer.backends.cuda.to_gpu', 'cuda.to_gpu', (['self.ggx'], {}), '(self.ggx)\n', (8130, 8140), False, 'from chainer.backends import cuda\n'), ((1384, 1401), 'numpy.exp', 'numpy.exp', (['self.x'], {}), '(self.x)\n', (1393, 1401), False, 'import numpy\n')] |
"""
Copyright (c) 2020 CRISP
The abstract parent class for Convolutional Sparse Coder
:author: <NAME>
"""
from abc import ABCMeta, abstractmethod
import numpy as np
import sys
import os
PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
sys.path.append(PATH)
from src.helpers.convolution import code_sparse
from tqdm import tqdm
from dask import delayed
from dask import compute
import dask.bag as db
class BaseCSC(metaclass=ABCMeta):
def __init__(self, dlen, error_tol, sparsity_tol, pflag):
self.dlen = dlen
self.error_tol = error_tol
self.sparsity_tol = sparsity_tol
self.pflag = pflag
def set_error(self, error):
self.error_tol = error
def set_sparsity(self, sparsity):
self.sparsity_tol = sparsity
def computeNorm(self, delem, slen):
"""
Compute norm of the all possible timeshifts of the dictionary
Inputs
======
delem: array-like. dictionary element
"""
numOfsamples = delem.shape[0]
clen = slen + numOfsamples - 1
norms = np.zeros(clen)
for idx in np.arange(clen):
if idx<numOfsamples-1:
norms[idx] = np.linalg.norm(delem[-(idx+1):],2)
elif idx>slen-1:
dlen = numOfsamples-(idx-(slen-1))
norms[idx] = np.linalg.norm(delem[:dlen],2)
else:
norms[idx] = 1
return norms
def extractCode(self, y_seg_set, d, indices, boundary=1):
"""
Extract the sparse codes from the data.
Intended to give more flexbility over various error/sparsity thresholds
Inputs
======
y_seg_set:
A set of segmented data.
These either can be equal or different lengths
indices:
indicates which group the segment is associated with.
sparsity level will be different for each segment
"""
numOfelements = d.shape[1]
coeffs = {}
if self.pflag: # Parallel implementation via Dask
output = []
for k, y_seg in tqdm(y_seg_set.items()):
if indices is None:
a = delayed(self.extractCode_seg_eff)(y_seg, d, sparsity=None, boundary=boundary)
else:
a = delayed(self.extractCode_seg_eff)(y_seg, d, sparsity=indices[k], boundary=boundary)
output.append(a)
o = compute(*(output))
coeffs = {i:code_sparse(o[i], numOfelements) for i in np.arange(np.shape(o)[0])}
else: # Sequential implementation
for k, y_seg in tqdm(y_seg_set.items()):
if indices is None:
c, _ = self.extractCode_seg(y_seg, d, sparsity=None, boundary=boundary)
else:
c, _ = self.extractCode_seg(y_seg, d, sparsity=indices[k], boundary=boundary)
sparse_c = code_sparse(c, numOfelements)
coeffs[k] = sparse_c
return coeffs
@abstractmethod
def extractCode_seg(self, y_seg, dictionary):
pass
| [
"dask.delayed",
"src.helpers.convolution.code_sparse",
"dask.compute",
"numpy.zeros",
"numpy.linalg.norm",
"os.path.abspath",
"numpy.shape",
"sys.path.append",
"numpy.arange"
] | [((282, 303), 'sys.path.append', 'sys.path.append', (['PATH'], {}), '(PATH)\n', (297, 303), False, 'import sys\n'), ((241, 266), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n'), ((1050, 1064), 'numpy.zeros', 'np.zeros', (['clen'], {}), '(clen)\n', (1058, 1064), True, 'import numpy as np\n'), ((1079, 1094), 'numpy.arange', 'np.arange', (['clen'], {}), '(clen)\n', (1088, 1094), True, 'import numpy as np\n'), ((2179, 2195), 'dask.compute', 'compute', (['*output'], {}), '(*output)\n', (2186, 2195), False, 'from dask import compute\n'), ((1141, 1178), 'numpy.linalg.norm', 'np.linalg.norm', (['delem[-(idx + 1):]', '(2)'], {}), '(delem[-(idx + 1):], 2)\n', (1155, 1178), True, 'import numpy as np\n'), ((2214, 2246), 'src.helpers.convolution.code_sparse', 'code_sparse', (['o[i]', 'numOfelements'], {}), '(o[i], numOfelements)\n', (2225, 2246), False, 'from src.helpers.convolution import code_sparse\n'), ((2579, 2608), 'src.helpers.convolution.code_sparse', 'code_sparse', (['c', 'numOfelements'], {}), '(c, numOfelements)\n', (2590, 2608), False, 'from src.helpers.convolution import code_sparse\n'), ((1255, 1286), 'numpy.linalg.norm', 'np.linalg.norm', (['delem[:dlen]', '(2)'], {}), '(delem[:dlen], 2)\n', (1269, 1286), True, 'import numpy as np\n'), ((1966, 1999), 'dask.delayed', 'delayed', (['self.extractCode_seg_eff'], {}), '(self.extractCode_seg_eff)\n', (1973, 1999), False, 'from dask import delayed\n'), ((2065, 2098), 'dask.delayed', 'delayed', (['self.extractCode_seg_eff'], {}), '(self.extractCode_seg_eff)\n', (2072, 2098), False, 'from dask import delayed\n'), ((2266, 2277), 'numpy.shape', 'np.shape', (['o'], {}), '(o)\n', (2274, 2277), True, 'import numpy as np\n')] |
# Copyright (C) 2019-2022, <NAME>.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
"""
Transformation for semantic segmentation
"""
import random
import numpy as np
import torch
from torchvision.transforms import InterpolationMode
from torchvision.transforms import functional as F
from torchvision.transforms import transforms
def pad_if_smaller(img, size, fill=0):
min_size = min(img.size)
if min_size < size:
ow, oh = img.size
padh = size - oh if oh < size else 0
padw = size - ow if ow < size else 0
img = F.pad(img, (0, 0, padw, padh), fill=fill)
return img
class Compose(transforms.Compose):
def __init__(self, transforms):
super(Compose, self).__init__(transforms)
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class Resize(object):
def __init__(self, output_size, interpolation=InterpolationMode.BILINEAR):
self.output_size = output_size
self.interpolation = interpolation
def __call__(self, image, target):
image = F.resize(image, self.output_size, interpolation=self.interpolation)
target = F.resize(target, self.output_size, interpolation=InterpolationMode.NEAREST)
return image, target
def __repr__(self):
return f"{self.__class__.__name__}(output_size={self.output_size})"
class RandomResize(object):
def __init__(self, min_size, max_size=None, interpolation=InterpolationMode.BILINEAR):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
self.interpolation = interpolation
def __call__(self, image, target):
if self.min_size == self.max_size:
size = self.min_size
else:
size = random.randint(self.min_size, self.max_size)
image = F.resize(image, size, interpolation=self.interpolation)
target = F.resize(target, size, interpolation=InterpolationMode.NEAREST)
return image, target
def __repr__(self):
return f"{self.__class__.__name__}(min_size={self.min_size}, max_size={self.max_size})"
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
# Flip the segmentation
target = F.hflip(target)
return image, target
def __repr__(self):
return f"{self.__class__.__name__}(p={self.prob})"
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = transforms.RandomCrop.get_params(image, (self.size, self.size))
image = F.crop(image, *crop_params)
target = F.crop(target, *crop_params)
return image, target
def __repr__(self):
return f"{self.__class__.__name__}(size={self.size})"
class ToTensor(transforms.ToTensor):
def __call__(self, img, target):
img = super(ToTensor, self).__call__(img)
target = torch.as_tensor(np.array(target), dtype=torch.int64)
return img, target
class ImageTransform(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, image, target):
image = self.transform.__call__(image)
return image, target
def __repr__(self):
return self.transform.__repr__()
| [
"torchvision.transforms.functional.hflip",
"torchvision.transforms.functional.crop",
"torchvision.transforms.transforms.RandomCrop.get_params",
"torchvision.transforms.functional.pad",
"numpy.array",
"torchvision.transforms.functional.resize",
"random.random",
"random.randint"
] | [((670, 711), 'torchvision.transforms.functional.pad', 'F.pad', (['img', '(0, 0, padw, padh)'], {'fill': 'fill'}), '(img, (0, 0, padw, padh), fill=fill)\n', (675, 711), True, 'from torchvision.transforms import functional as F\n'), ((1239, 1306), 'torchvision.transforms.functional.resize', 'F.resize', (['image', 'self.output_size'], {'interpolation': 'self.interpolation'}), '(image, self.output_size, interpolation=self.interpolation)\n', (1247, 1306), True, 'from torchvision.transforms import functional as F\n'), ((1324, 1399), 'torchvision.transforms.functional.resize', 'F.resize', (['target', 'self.output_size'], {'interpolation': 'InterpolationMode.NEAREST'}), '(target, self.output_size, interpolation=InterpolationMode.NEAREST)\n', (1332, 1399), True, 'from torchvision.transforms import functional as F\n'), ((2031, 2086), 'torchvision.transforms.functional.resize', 'F.resize', (['image', 'size'], {'interpolation': 'self.interpolation'}), '(image, size, interpolation=self.interpolation)\n', (2039, 2086), True, 'from torchvision.transforms import functional as F\n'), ((2104, 2167), 'torchvision.transforms.functional.resize', 'F.resize', (['target', 'size'], {'interpolation': 'InterpolationMode.NEAREST'}), '(target, size, interpolation=InterpolationMode.NEAREST)\n', (2112, 2167), True, 'from torchvision.transforms import functional as F\n'), ((2968, 3031), 'torchvision.transforms.transforms.RandomCrop.get_params', 'transforms.RandomCrop.get_params', (['image', '(self.size, self.size)'], {}), '(image, (self.size, self.size))\n', (3000, 3031), False, 'from torchvision.transforms import transforms\n'), ((3048, 3075), 'torchvision.transforms.functional.crop', 'F.crop', (['image', '*crop_params'], {}), '(image, *crop_params)\n', (3054, 3075), True, 'from torchvision.transforms import functional as F\n'), ((3093, 3121), 'torchvision.transforms.functional.crop', 'F.crop', (['target', '*crop_params'], {}), '(target, *crop_params)\n', (3099, 3121), True, 'from torchvision.transforms import functional as F\n'), ((1970, 2014), 'random.randint', 'random.randint', (['self.min_size', 'self.max_size'], {}), '(self.min_size, self.max_size)\n', (1984, 2014), False, 'import random\n'), ((2462, 2477), 'random.random', 'random.random', ([], {}), '()\n', (2475, 2477), False, 'import random\n'), ((2511, 2525), 'torchvision.transforms.functional.hflip', 'F.hflip', (['image'], {}), '(image)\n', (2518, 2525), True, 'from torchvision.transforms import functional as F\n'), ((2583, 2598), 'torchvision.transforms.functional.hflip', 'F.hflip', (['target'], {}), '(target)\n', (2590, 2598), True, 'from torchvision.transforms import functional as F\n'), ((3398, 3414), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (3406, 3414), True, 'import numpy as np\n')] |
import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys
from numpy import newaxis
from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors
from nltk.stem.wordnet import WordNetLemmatizer
from tensorflow.python.layers.core import Dense
from nltk.corpus import stopwords
from multiprocessing import Pool
from collections import Counter
from pprint import pprint
from keras.models import Model
from keras.layers import *
from keras.optimizers import *
from keras.models import model_from_json
from keras.models import load_model
from keras.callbacks import *
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import SmoothingFunction
from copy import deepcopy
#eager.enable_eager_execution()
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
train_original = "train.en"
train_translated = "train.vi"
test_original = "tst2013.en"
test_translated = "tst2013.vi"
word_number_mapping_file = "word_mappings.txt"
processed_original = "translated_numeric.txt"
processed_translated = "original_numeric.txt"
modelDir = './model/'
modelFileName = 'Eldin_Sahbaz_Model.ckpt'
#this commented filtering code splits the code at the character level rather than the word level
'''
def filter_symbols(original_input, translated_input):
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', ' ', zero(text))
two = lambda text: list(one(text)) #[character for character in list(one(text)) if (character not in ['-', '\\', '/', '.', '—', '…', '...', '?', ',', '<', '>', '\"', ';', ':', '[', ']', '{', '}', '|', '=', '+', '_', '*', '&', '^', '%', '$', '#', '@', '!', '`', '~'])]
return (two(original_input), two(translated_input))
except:
return None
def filter_symbols_test(input_text):
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', ' ', zero(text))
two = lambda text: list(one(text)) #[character for character in list(one(text)) if (character not in ['-', '\\', '/', '.', '—', '…', '...', '?', ',', '<', '>', '\"', ';', ':', '[', ']', '{', '}', '|', '=', '+', '_', '*', '&', '^', '%', '$', '#', '@', '!', '`', '~'])]
return two(input_text)
except:
return None
'''
#In this function, I convert all contractions (converting can't to cannot and so on), tokenize the sentences at the word
#level, lemmatize the words and remove the stop words from the input text but not from the translated text
def filter_symbols(original_input, translated_input):
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
table = str.maketrans({key: None for key in string.punctuation})
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', '.', zero(text))
two = lambda text: nltk.word_tokenize(one(text))
three_1 = lambda text: list(filter(lambda x: x, [lemmatizer.lemmatize(word1.translate(table)) for word1 in two(text) if ((word1 not in stop_words))]))
three_2 = lambda text: list(filter(lambda x: x, [lemmatizer.lemmatize(word1.translate(table)) for word1 in two(text)]))
return (three_1(original_input), three_2(translated_input))
except:
return None
#This is the same as the function above except for it only takes the input text that is used for testing
def filter_symbols_test(input_text):
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
table = str.maketrans({key: None for key in string.punctuation})
try:
zero = lambda text: contractions.fix(text.lower())
one = lambda text: re.sub('\.\.\.', '.', zero(text))
two = lambda text: nltk.word_tokenize(one(text))
three = lambda text: list(filter(lambda x: x, [lemmatizer.lemmatize(word1.translate(table)) for word1 in two(text) if ((word1 not in stop_words))]))
return(three(input_text))
except:
return None
#Here load the data files, use multiprocessing to apply filtering to all the data, then store the filtered files
def clean_data(original, translated):
cleaned = None
with open(original, 'r', encoding="utf8") as file:
original_data = file.read().split('\n')
with open(translated, 'r', encoding="utf8") as file:
translated_data = file.read().split('\n')
data = list(zip(original_data, translated_data))
pool = Pool()
cleaned = pool.starmap(filter_symbols, data)
pool.close()
pool.join()
original_text, translated_text = list(zip(*cleaned))
original_text = list(filter(lambda y: y, original_text))
translated_text = list(filter(lambda y: y, translated_text))
with open("filtered_data", 'wb') as file: pickle.dump(cleaned, file)
return (original_text, translated_text)
def convert_text(original_text, translated_text, cutoff):
original_DNS = {'forward':{'<PAD>':0, '<UNK>':1, '<EOS>':2, '<GO>':3}, 'backward':{0:'<PAD>', 1:'<UNK>', 2:'<EOS>', 3:'<GO>'}}
translated_DNS = {'forward': {'<PAD>': 0, '<UNK>': 1, '<EOS>': 2, '<GO>': 3}, 'backward': {0: '<PAD>', 1: '<UNK>', 2: '<EOS>', 3: '<GO>'}}
original_words = list()
translated_words = list()
stop_words = set(stopwords.words('english'))
converted_original, converted_translated = list(), list()
#aggregate all the words into a list
for sentence in original_text: original_words.extend(sentence)
for sentence in translated_text: translated_words.extend(sentence)
original_word_frequencies = [x for x in sorted(Counter(original_words).items(), key=lambda x: x[1], reverse=True) if ((x[1] >= cutoff) and (x[0] not in stop_words))]
translated_word_frequencies = [x for x in sorted(Counter(translated_words).items(), key=lambda x: x[1], reverse=True) if (x[1] >= cutoff)]
# create mapping for word -> int and for int -> word for the first language
if original_word_frequencies:
words, freqs = list(zip(*original_word_frequencies))
original_DNS['forward'].update(dict(zip(words, list(range(len(original_DNS['forward']), len(words)+len(original_DNS['forward']))))))
original_DNS['backward'].update({v: k for k, v in original_DNS['forward'].items()})
# create mapping for word -> int and for int -> word for the second language
if translated_word_frequencies:
words, freqs = list(zip(*translated_word_frequencies))
translated_DNS['forward'].update(dict(zip(words, list(range(len(translated_DNS['forward']), len(words)+len(translated_DNS['forward']))))))
translated_DNS['backward'].update({v: k for k, v in translated_DNS['forward'].items()})
#Compute the translation to int for the full text
for sentence in original_text:
temp_sentence = list()
temp_sentence.append(original_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(original_DNS['forward'][word])
except : temp_sentence.append(original_DNS['forward']['<UNK>'])
temp_sentence.append(original_DNS['forward']['<EOS>'])
converted_original.append(temp_sentence)
for sentence in translated_text:
temp_sentence = list()
temp_sentence.append(translated_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(translated_DNS['forward'][word])
except : temp_sentence.append(translated_DNS['forward']['<UNK>'])
temp_sentence.append(translated_DNS['forward']['<EOS>'])
converted_translated.append(temp_sentence)
#These lines of code get some statistics about the dataset
original_text_lengths, translated_text_lengths, original_unk_counts, translated_unk_counts = list(), list(), list(), list()
#90th percentile of original text lengths
for sentence in converted_original: original_text_lengths.append(len(sentence))
original_text_pd = pd.DataFrame(original_text_lengths, columns=['counts'])
max_original_length = int(np.percentile(original_text_pd.counts, 90))
#90th percentile of translated text lengths
for sentence in converted_translated: translated_text_lengths.append(len(sentence))
translated_text_pd = pd.DataFrame(translated_text_lengths, columns=['counts'])
max_translated_length = int(np.percentile(translated_text_pd.counts, 90))
#5th percentile for minimum text length
data_pd = pd.DataFrame(original_text_lengths + translated_text_lengths, columns=['counts'])
min_length = int(np.percentile(data_pd.counts, 5))
#5th percentile for minimum unknown token limit in original text
for sentence in converted_original: original_unk_counts.append(Counter(sentence)[original_DNS['forward']['<UNK>']])
original_pd = pd.DataFrame(original_unk_counts, columns=['counts'])
unk_original_limit = int(np.percentile(original_pd.counts, 5))
#5th percentile for minimum unknown token limit in translated text
for sentence in converted_translated: translated_unk_counts.append(Counter(sentence)[translated_DNS['forward']['<UNK>']])
translated_pd = pd.DataFrame(translated_unk_counts, columns=['counts'])
unk_translated_limit = int(np.percentile(translated_pd.counts, 5))
#truncate all the text and pad them with 0s
truncated_original_text, truncated_translated_text = list(), list()
#padding here is done in the front because the string is reversed
for sentence in converted_original:
temp = sentence[:max_original_length]
temp[-1] = original_DNS['forward']['<EOS>']
temp = list(reversed(temp))
if len(temp) < max_original_length: temp[0:0] = [original_DNS['forward']['<PAD>']]*(max_original_length-len(temp))
truncated_original_text.append(temp)
#padding here is done at the end
for sentence in converted_translated:
temp = sentence[:max_translated_length]
temp[-1] = translated_DNS['forward']['<EOS>']
if len(temp) < max_translated_length: temp[len(temp):len(temp)] = [translated_DNS['forward']['<PAD>']]*(max_translated_length-len(temp))
truncated_translated_text.append(temp)
#remove samples that have too many unknown tokens
cleaned_truncated_original, cleaned_truncated_translated = list(), list()
for original, translated in list(zip(truncated_original_text, truncated_translated_text)):
original_count, translated_count = Counter(original), Counter(translated)
if ((original_count[original_DNS['forward']['<UNK>']] <= unk_original_limit) and (translated_count[translated_DNS['forward']['<UNK>']] <= unk_translated_limit) and (len(original) >= min_length) and (len(translated) >= min_length)):
cleaned_truncated_original.append(original)
cleaned_truncated_translated.append(translated)
return (original_DNS, translated_DNS, np.array(cleaned_truncated_original), np.array(cleaned_truncated_translated), max_original_length, max_translated_length, min_length, unk_original_limit, unk_translated_limit)
def convert_text_test(original_text, translated_text, original_DNS, translated_DNS, max_original_length, max_translated_length):
converted_original, converted_translated = list(), list()
# Compute the translation to int for the full text
for sentence in original_text:
temp_sentence = list()
temp_sentence.append(original_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(original_DNS['forward'][word])
except: temp_sentence.append(original_DNS['forward']['<UNK>'])
temp_sentence.append(original_DNS['forward']['<EOS>'])
converted_original.append(temp_sentence)
for sentence in translated_text:
temp_sentence = list()
temp_sentence.append(translated_DNS['forward']['<GO>'])
for word in sentence:
try: temp_sentence.append(translated_DNS['forward'][word])
except: temp_sentence.append(translated_DNS['forward']['<UNK>'])
temp_sentence.append(translated_DNS['forward']['<EOS>'])
converted_translated.append(temp_sentence)
# Compute the truncated version of the texts above
truncated_original_text, truncated_translated_text = list(), list()
for sentence in converted_original:
temp = sentence[:max_original_length]
temp[-1] = original_DNS['forward']['<EOS>']
temp = list(reversed(temp))
if len(temp) < max_original_length: temp[0:0] = [original_DNS['forward']['<PAD>']] * (max_original_length - len(temp))
truncated_original_text.append(temp)
for sentence in converted_translated:
temp = sentence[:max_translated_length]
temp[-1] = translated_DNS['forward']['<EOS>']
if len(temp) < max_translated_length: temp[len(temp):len(temp)] = [translated_DNS['forward']['<PAD>']] * (max_translated_length - len(temp))
truncated_translated_text.append(temp)
return (np.array(truncated_original_text), np.array(truncated_translated_text))
def build_model(num_encoder_tokens, num_decoder_tokens, original_vocab_length, translated_vocab_length, embed_size, nodes, batch_size):
num_encoder_layers = 1
num_decoder_layers = 2
#inputs place holders
inputs = tf.placeholder(tf.int32, (None, None), 'inputs') # num_encoder_tokens
outputs = tf.placeholder(tf.int32, (None, None), 'output')
targets = tf.placeholder(tf.int32, (None, None), 'targets')
keep_rate = tf.placeholder(tf.float32, (1), 'keep_rate')
decoder_size = tf.placeholder(tf.int32, (1), 'decoder_size')
#embedding variables for the encoder and decoder inputs
input_embedding = tf.Variable(tf.random_uniform((original_vocab_length, embed_size), -1.0, 1.0), name='enc_embedding')
output_embedding = tf.Variable(tf.random_uniform((translated_vocab_length, embed_size), -1.0, 1.0), name='dec_embedding')
encoder_embedding = tf.nn.embedding_lookup(input_embedding, inputs)
decoder_embedding = tf.nn.embedding_lookup(output_embedding, outputs)
#bidirectional encoder LSTM with dropout
prev_input = encoder_embedding
last_state = None
for i in range(num_encoder_layers):
enc_fw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(nodes), output_keep_prob=keep_rate[0], state_keep_prob=keep_rate[0])
enc_bw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(nodes), output_keep_prob=keep_rate[0], state_keep_prob=keep_rate[0])
((forward_output, backward_output), (forward_state, backward_state)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=enc_fw_cell, cell_bw=enc_bw_cell, inputs=prev_input, dtype=tf.float32, scope="encoder_rnn{0}".format(str(i)))
last_state = tf.contrib.rnn.LSTMStateTuple(c=tf.concat((forward_state.c, backward_state.c), 1), h=tf.concat((forward_state.h, backward_state.h), 1))
prev_input = tf.concat((forward_output, backward_output), 1)
#decoder LSTM with dropout
prev_input = decoder_embedding
for i in range(num_decoder_layers):
lstm_dec = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(2 * nodes), output_keep_prob=keep_rate[0], state_keep_prob=keep_rate[0])
prev_input, _ = tf.nn.dynamic_rnn(lstm_dec, inputs=prev_input, initial_state=last_state, scope='decoder_rnn{0}'.format(str(i)))
#Dense layers used as Bag of Words
logits = tf.layers.dense(prev_input, units=translated_vocab_length, use_bias=True)
#Get the loss and optimize using RMSProp
loss = tf.contrib.seq2seq.sequence_loss(logits, targets, tf.ones([batch_size, (decoder_size[0] - 1)]))
optimizer = tf.train.RMSPropOptimizer(1e-3).minimize(loss)
return (optimizer, loss, logits, inputs, outputs, targets, keep_rate, input_embedding, output_embedding, encoder_embedding, decoder_embedding, decoder_size)
def train_and_save(encoder_input_data, decoder_input_data, optimizer, loss, logits, keep_rate, epochs, batch_size, inputs, outputs, targets, decoder_size, session, modelDir, modelFileName, saver):
session.run(tf.global_variables_initializer())
iterations = 100
#training for some specified number of iterations
for iteration_i in range(iterations):
print("iteration: {0}".format(str(iteration_i)))
#each iteration has some specified number of epochs
for epoch_i in range(epochs):
#randomly sample batches from the data
batch_idx = np.random.choice(np.arange(encoder_input_data.shape[0]), size=batch_size)
batch_x, batch_y = encoder_input_data[batch_idx, :], decoder_input_data[batch_idx,]
#iterate over the batches
for batch_i, (source_batch, target_batch) in enumerate([(batch_x, batch_y)]):
#removes extra padding in each batch
source_batch = (source_batch[:, min([np.where(np.array(batch) == 2)[0][0] for batch in source_batch]):])
target_batch = (target_batch[:, :(max([np.where(np.array(batch) == 2)[0][0] for batch in target_batch]) + 1)])
#performs training
_, batch_loss, batch_logits = session.run([optimizer, loss, logits], feed_dict={inputs:source_batch, outputs:target_batch[:, :-1], targets:target_batch[:, 1:], keep_rate:[0.8], decoder_size:[target_batch.shape[1]]})
accuracy = np.mean(batch_logits.argmax(axis=-1) == target_batch[:, 1:])
print('\tEpoch {:3} Loss: {:>6.3f} Accuracy: {:>6.4f}'.format(epoch_i, batch_loss, accuracy))
#saves the model
if (not(iteration_i % 10)):
try:
saver.save(session, (modelDir + str(iteration_i) + modelFileName))
save_path = saver.save(session, (modelDir + modelFileName))
except:
os.makedirs(modelDir)
saver.save(session, (modelDir + str(iteration_i) + modelFileName))
save_path = saver.save(session, (modelDir + modelFileName))
print("save path: {0}".format(save_path))
#this is a shortened version of convert_text and is used for the translation session
def prep_test_data(text, original_DNS, max_length):
temp_text = list()
temp_text.append(original_DNS['forward']['<GO>'])
#first clean the text data then convert it to numeric format
for word in filter_symbols_test(text):
try: temp_text.append(original_DNS['forward'][word])
except: temp_text.append(original_DNS['forward']['<UNK>'])
temp_text.append(original_DNS['forward']['<EOS>'])
#this truncated, pads, and flips the input text
temp_text = temp_text[:max_length]
temp_text[-1] = original_DNS['forward']['<EOS>']
temp_text = list(reversed(temp_text))
if len(temp_text) < max_length: temp_text[0:0] = [original_DNS['forward']['<PAD>']]*(max_length-len(temp_text))
return temp_text
def test(original, translation, original_DNS, translated_DNS, max_original_length, max_translated_length, nodes, embed_size, batch_size, modelDir, modelFileName, isTranslate):
optimizer, loss, logits, inputs, outputs, targets, keep_rate, input_embedding, output_embedding, encoder_embedding, decoder_embedding, decoder_size = build_model(max_original_length, max_translated_length, len(original_DNS['forward']), len(translated_DNS['forward']), embed_size, nodes, batch_size)
saver = tf.train.Saver()
#Use the GPU and some items may not be able to run on the GPU, so use soft placement
with tf.device('/device:GPU:0'), tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:
saver.restore(session, (modelDir + modelFileName))
sfun = SmoothingFunction()
results = list()
#This is for testing on a dataset
if (not(isTranslate)):
for z, (x, y) in enumerate(list(zip(original, translation))):
#dec_input will be iteratively updated with each prediction and feb back into the model
dec_input = np.array([])
#first token is the <GO> token
dec_input = np.append(dec_input, translated_DNS['forward']['<GO>'])
#Iterate for the max number of tokens
for i in range(1, max_translated_length):
#input the parameters into the model and get a result
batch_logits = session.run(logits, feed_dict={inputs: [np.trim_zeros(x)], outputs: [dec_input], keep_rate:[1.0], decoder_size:[dec_input.shape[0]]})
#append the result to dec_inputs
dec_input = np.append(dec_input, (batch_logits[:, -1].argmax(axis=-1)[0]))
#if the prediction was <EOS>...break out of loop
if translated_DNS['backward'][dec_input[i]] == '<EOS>': break
#convert from numbers -> words and compute smoothed BLEU score
input_ = [original_DNS['backward'][k] for k in np.trim_zeros(x)]
predicted = [translated_DNS['backward'][k] for k in dec_input]
actual = [translated_DNS['backward'][k] for k in np.trim_zeros(y)]
results.append((input_, actual, predicted, (nltk.translate.bleu_score.sentence_bleu([actual], predicted, smoothing_function=sfun.method1) * 100)))
print("{0}/{1}...BLEU Score: {2}".format(z, len(translation), results[-1][-1]))
#print the mean BLEU score
print(np.mean([x[-1] for x in results]))
else:
#this is for using it to translate a given input in an interactive session
with open((modelDir + 'parameters.txt'), 'rb') as file: params = pickle.loads(file.read())
while(1):
#get input from console, prepare the input for translateion, then translate it
original = [prep_test_data(input("> "), params['original_DNS'], params['max_original_length'])]
#the rest here is the same as for testing
for z, (x, y) in enumerate(list(zip(original, translation))):
dec_input = np.array([])
dec_input = np.append(dec_input, translated_DNS['forward']['<GO>'])
for i in range(1, max_translated_length):
batch_logits = session.run(logits, feed_dict={inputs: [np.trim_zeros(x)], outputs: [dec_input], keep_rate: [1.0], decoder_size: [dec_input.shape[0]]})
dec_input = np.append(dec_input, (batch_logits[:, -1].argmax(axis=-1)[0]))
if translated_DNS['backward'][dec_input[i]] == '<EOS>': break
print(' '.join([translated_DNS['backward'][k] for k in dec_input][1:-1]))
#used to load the stored information required for the test function
def testWrapper():
with open((modelDir + 'parameters.txt'), 'rb') as file: params = pickle.loads(file.read())
processed_original_test, processed_translated_test = clean_data(test_original, test_translated)
encoder_input_data_test, decoder_input_data_test = convert_text_test(processed_original_test, processed_translated_test, params['original_DNS'], params['translated_DNS'], params['max_original_length'], params['max_translated_length'])
test(encoder_input_data_test, decoder_input_data_test, params['original_DNS'], params['translated_DNS'], params['max_original_length'], params['max_translated_length'], params['nodes'], params['embed_size'], params['batch_size'], modelDir, modelFileName, 0)
#trains the model
def train(modelDir, modelFileName):
epochs = 100
batch_size = 1
nodes = 256
embed_size = 300
##loads and cleans the training data from file path names
processed_original, processed_translated = clean_data(train_original, train_translated)
#use soft placement on the GPU
with tf.device('/device:GPU:0'), tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:
#convert the data to numeric representation
original_DNS, translated_DNS, encoder_input_data, decoder_input_data, max_original_length, max_translated_length, min_length, unk_original_limit, unk_translated_limit = convert_text(processed_original, processed_translated, 15)
encoder_input_data = np.array([x for x in encoder_input_data])
decoder_input_data = np.array([np.array(x) for x in decoder_input_data])
#builds the model used for training/inference
optimizer, loss, logits, inputs, outputs, targets, keep_rate, input_embedding, output_embedding, encoder_embedding, decoder_embedding, decoder_size = build_model(max_original_length, max_translated_length, len(original_DNS['forward']), len(translated_DNS['forward']), embed_size, nodes, batch_size)
#saves the parameters for this session and creates a saver object
saver = tf.train.Saver()
with open((modelDir + 'parameters.txt'), 'wb') as file: pickle.dump({'original_DNS': original_DNS, 'translated_DNS': translated_DNS, 'max_original_length': max_original_length, 'max_translated_length': max_translated_length, 'min_length': min_length, 'batch_size': batch_size, 'nodes': nodes, 'embed_size': embed_size}, file)
#trains and saves the model
train_and_save(encoder_input_data, decoder_input_data, optimizer, loss, logits, keep_rate, epochs, batch_size, inputs, outputs, targets, decoder_size, session, modelDir, modelFileName, saver)
def trainWrapper():
train(modelDir, modelFileName)
#loads data from the interactive translation session and calls the test function
def translate():
with open((modelDir + 'parameters.txt'), 'rb') as file: params = pickle.loads(file.read())
test(None, [[None]], params['original_DNS'], params['translated_DNS'], params['max_original_length'], params['max_translated_length'], params['nodes'], params['embed_size'], params['batch_size'], modelDir, modelFileName, 1)
if ((__name__ == '__main__') and (len(sys.argv) > 1)):
code = {'train': 0, 'test': 1, 'translate': 2}
{0 : trainWrapper, 1: testWrapper, 2: translate}[code[sys.argv[1]]]()
| [
"_pickle.dump",
"numpy.array",
"tensorflow.contrib.rnn.LSTMCell",
"numpy.arange",
"tensorflow.nn.embedding_lookup",
"nltk.translate.bleu_score.SmoothingFunction",
"numpy.mean",
"nltk.corpus.stopwords.words",
"tensorflow.placeholder",
"tensorflow.concat",
"pandas.DataFrame",
"tensorflow.ConfigP... | [((822, 855), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (845, 855), False, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((2734, 2753), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2751, 2753), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((3607, 3626), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (3624, 3626), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((4555, 4561), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (4559, 4561), False, 'from multiprocessing import Pool\n'), ((8024, 8079), 'pandas.DataFrame', 'pd.DataFrame', (['original_text_lengths'], {'columns': "['counts']"}), "(original_text_lengths, columns=['counts'])\n", (8036, 8079), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8316, 8373), 'pandas.DataFrame', 'pd.DataFrame', (['translated_text_lengths'], {'columns': "['counts']"}), "(translated_text_lengths, columns=['counts'])\n", (8328, 8373), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8511, 8597), 'pandas.DataFrame', 'pd.DataFrame', (['(original_text_lengths + translated_text_lengths)'], {'columns': "['counts']"}), "(original_text_lengths + translated_text_lengths, columns=[\n 'counts'])\n", (8523, 8597), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8856, 8909), 'pandas.DataFrame', 'pd.DataFrame', (['original_unk_counts'], {'columns': "['counts']"}), "(original_unk_counts, columns=['counts'])\n", (8868, 8909), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((9195, 9250), 'pandas.DataFrame', 'pd.DataFrame', (['translated_unk_counts'], {'columns': "['counts']"}), "(translated_unk_counts, columns=['counts'])\n", (9207, 9250), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13337, 13385), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None, None)', '"""inputs"""'], {}), "(tf.int32, (None, None), 'inputs')\n", (13351, 13385), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13422, 13470), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None, None)', '"""output"""'], {}), "(tf.int32, (None, None), 'output')\n", (13436, 13470), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13485, 13534), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None, None)', '"""targets"""'], {}), "(tf.int32, (None, None), 'targets')\n", (13499, 13534), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13551, 13593), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(1)', '"""keep_rate"""'], {}), "(tf.float32, 1, 'keep_rate')\n", (13565, 13593), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13615, 13658), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(1)', '"""decoder_size"""'], {}), "(tf.int32, 1, 'decoder_size')\n", (13629, 13658), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13995, 14042), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['input_embedding', 'inputs'], {}), '(input_embedding, inputs)\n', (14017, 14042), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((14067, 14116), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['output_embedding', 'outputs'], {}), '(output_embedding, outputs)\n', (14089, 14116), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((15447, 15520), 'tensorflow.layers.dense', 'tf.layers.dense', (['prev_input'], {'units': 'translated_vocab_length', 'use_bias': '(True)'}), '(prev_input, units=translated_vocab_length, use_bias=True)\n', (15462, 15520), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((19385, 19401), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (19399, 19401), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((2689, 2715), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2704, 2715), False, 'from nltk.corpus import stopwords\n'), ((3562, 3588), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (3577, 3588), False, 'from nltk.corpus import stopwords\n'), ((4876, 4902), '_pickle.dump', 'pickle.dump', (['cleaned', 'file'], {}), '(cleaned, file)\n', (4887, 4902), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((5359, 5385), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (5374, 5385), False, 'from nltk.corpus import stopwords\n'), ((8110, 8152), 'numpy.percentile', 'np.percentile', (['original_text_pd.counts', '(90)'], {}), '(original_text_pd.counts, 90)\n', (8123, 8152), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8406, 8450), 'numpy.percentile', 'np.percentile', (['translated_text_pd.counts', '(90)'], {}), '(translated_text_pd.counts, 90)\n', (8419, 8450), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8614, 8646), 'numpy.percentile', 'np.percentile', (['data_pd.counts', '(5)'], {}), '(data_pd.counts, 5)\n', (8627, 8646), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8939, 8975), 'numpy.percentile', 'np.percentile', (['original_pd.counts', '(5)'], {}), '(original_pd.counts, 5)\n', (8952, 8975), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((9282, 9320), 'numpy.percentile', 'np.percentile', (['translated_pd.counts', '(5)'], {}), '(translated_pd.counts, 5)\n', (9295, 9320), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((10940, 10976), 'numpy.array', 'np.array', (['cleaned_truncated_original'], {}), '(cleaned_truncated_original)\n', (10948, 10976), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((10978, 11016), 'numpy.array', 'np.array', (['cleaned_truncated_translated'], {}), '(cleaned_truncated_translated)\n', (10986, 11016), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13034, 13067), 'numpy.array', 'np.array', (['truncated_original_text'], {}), '(truncated_original_text)\n', (13042, 13067), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13069, 13104), 'numpy.array', 'np.array', (['truncated_translated_text'], {}), '(truncated_translated_text)\n', (13077, 13104), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13756, 13821), 'tensorflow.random_uniform', 'tf.random_uniform', (['(original_vocab_length, embed_size)', '(-1.0)', '(1.0)'], {}), '((original_vocab_length, embed_size), -1.0, 1.0)\n', (13773, 13821), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((13880, 13947), 'tensorflow.random_uniform', 'tf.random_uniform', (['(translated_vocab_length, embed_size)', '(-1.0)', '(1.0)'], {}), '((translated_vocab_length, embed_size), -1.0, 1.0)\n', (13897, 13947), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((14957, 15004), 'tensorflow.concat', 'tf.concat', (['(forward_output, backward_output)', '(1)'], {}), '((forward_output, backward_output), 1)\n', (14966, 15004), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((15628, 15670), 'tensorflow.ones', 'tf.ones', (['[batch_size, decoder_size[0] - 1]'], {}), '([batch_size, decoder_size[0] - 1])\n', (15635, 15670), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((16113, 16146), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16144, 16146), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((19501, 19527), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (19510, 19527), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((19676, 19695), 'nltk.translate.bleu_score.SmoothingFunction', 'SmoothingFunction', ([], {}), '()\n', (19693, 19695), False, 'from nltk.translate.bleu_score import SmoothingFunction\n'), ((23807, 23833), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (23816, 23833), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((24225, 24266), 'numpy.array', 'np.array', (['[x for x in encoder_input_data]'], {}), '([x for x in encoder_input_data])\n', (24233, 24266), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((24801, 24817), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (24815, 24817), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((10501, 10518), 'collections.Counter', 'Counter', (['original'], {}), '(original)\n', (10508, 10518), False, 'from collections import Counter\n'), ((10520, 10539), 'collections.Counter', 'Counter', (['translated'], {}), '(translated)\n', (10527, 10539), False, 'from collections import Counter\n'), ((14312, 14342), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['nodes'], {}), '(nodes)\n', (14335, 14342), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((14457, 14487), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['nodes'], {}), '(nodes)\n', (14480, 14487), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((15161, 15195), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['(2 * nodes)'], {}), '(2 * nodes)\n', (15184, 15195), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((15690, 15722), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (15715, 15722), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((24882, 25163), '_pickle.dump', 'pickle.dump', (["{'original_DNS': original_DNS, 'translated_DNS': translated_DNS,\n 'max_original_length': max_original_length, 'max_translated_length':\n max_translated_length, 'min_length': min_length, 'batch_size':\n batch_size, 'nodes': nodes, 'embed_size': embed_size}", 'file'], {}), "({'original_DNS': original_DNS, 'translated_DNS': translated_DNS,\n 'max_original_length': max_original_length, 'max_translated_length':\n max_translated_length, 'min_length': min_length, 'batch_size':\n batch_size, 'nodes': nodes, 'embed_size': embed_size}, file)\n", (24893, 25163), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((8785, 8802), 'collections.Counter', 'Counter', (['sentence'], {}), '(sentence)\n', (8792, 8802), False, 'from collections import Counter\n'), ((9120, 9137), 'collections.Counter', 'Counter', (['sentence'], {}), '(sentence)\n', (9127, 9137), False, 'from collections import Counter\n'), ((14832, 14881), 'tensorflow.concat', 'tf.concat', (['(forward_state.c, backward_state.c)', '(1)'], {}), '((forward_state.c, backward_state.c), 1)\n', (14841, 14881), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((14885, 14934), 'tensorflow.concat', 'tf.concat', (['(forward_state.h, backward_state.h)', '(1)'], {}), '((forward_state.h, backward_state.h), 1)\n', (14894, 14934), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((16514, 16552), 'numpy.arange', 'np.arange', (['encoder_input_data.shape[0]'], {}), '(encoder_input_data.shape[0])\n', (16523, 16552), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((19547, 19588), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (19561, 19588), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((20001, 20013), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (20009, 20013), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((20090, 20145), 'numpy.append', 'np.append', (['dec_input', "translated_DNS['forward']['<GO>']"], {}), "(dec_input, translated_DNS['forward']['<GO>'])\n", (20099, 20145), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((21440, 21473), 'numpy.mean', 'np.mean', (['[x[-1] for x in results]'], {}), '([x[-1] for x in results])\n', (21447, 21473), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((23853, 23894), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (23867, 23894), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((24306, 24317), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (24314, 24317), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((17829, 17850), 'os.makedirs', 'os.makedirs', (['modelDir'], {}), '(modelDir)\n', (17840, 17850), False, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((22076, 22088), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22084, 22088), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((22121, 22176), 'numpy.append', 'np.append', (['dec_input', "translated_DNS['forward']['<GO>']"], {}), "(dec_input, translated_DNS['forward']['<GO>'])\n", (22130, 22176), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((5685, 5708), 'collections.Counter', 'Counter', (['original_words'], {}), '(original_words)\n', (5692, 5708), False, 'from collections import Counter\n'), ((5857, 5882), 'collections.Counter', 'Counter', (['translated_words'], {}), '(translated_words)\n', (5864, 5882), False, 'from collections import Counter\n'), ((20944, 20960), 'numpy.trim_zeros', 'np.trim_zeros', (['x'], {}), '(x)\n', (20957, 20960), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((21106, 21122), 'numpy.trim_zeros', 'np.trim_zeros', (['y'], {}), '(y)\n', (21119, 21122), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((21184, 21281), 'nltk.translate.bleu_score.sentence_bleu', 'nltk.translate.bleu_score.sentence_bleu', (['[actual]', 'predicted'], {'smoothing_function': 'sfun.method1'}), '([actual], predicted,\n smoothing_function=sfun.method1)\n', (21223, 21281), False, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((20408, 20424), 'numpy.trim_zeros', 'np.trim_zeros', (['x'], {}), '(x)\n', (20421, 20424), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((22319, 22335), 'numpy.trim_zeros', 'np.trim_zeros', (['x'], {}), '(x)\n', (22332, 22335), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((16911, 16926), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (16919, 16926), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n'), ((17034, 17049), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (17042, 17049), True, 'import pandas as pd, numpy as np, tensorflow as tf, re, time, sys, contractions, _pickle as pickle, os, nltk, random, string, warnings, os, sys\n')] |
from reactopya import Component
import numpy as np
class InteractivePlotly(Component):
def __init__(self):
super().__init__()
def javascript_state_changed(self, prev_state, state):
noise_level = state.get('noise_level', 0)
num_points = state.get('num_points', 10)
times0 = np.linspace(0, 100, num_points)
amplitudes0 = times0 + np.random.normal(0, 1, times0.shape) * noise_level
self.set_python_state(dict(
series=[dict(
times=times0,
amplitudes=amplitudes0
)]
))
| [
"numpy.random.normal",
"numpy.linspace"
] | [((315, 346), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', 'num_points'], {}), '(0, 100, num_points)\n', (326, 346), True, 'import numpy as np\n'), ((378, 414), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'times0.shape'], {}), '(0, 1, times0.shape)\n', (394, 414), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a list of sensors to gym space."""
import gym
from gym import spaces
import numpy as np
import typing
from locomotion.envs.sensors import sensor
class UnsupportedConversionError(Exception):
"""An exception when the function cannot convert sensors to gym space."""
class AmbiguousDataTypeError(Exception):
"""An exception when the function cannot determine the data type."""
def convert_sensors_to_gym_space(
sensors: typing.List[sensor.Sensor]) -> gym.Space:
"""Convert a list of sensors to the corresponding gym space.
Args:
sensors: a list of the current sensors
Returns:
space: the converted gym space
Raises:
UnsupportedConversionError: raises when the function cannot convert the
given list of sensors.
"""
if all([
isinstance(s, sensor.BoxSpaceSensor) and s.get_dimension() == 1
for s in sensors
]):
return convert_1d_box_sensors_to_gym_space(sensors)
raise UnsupportedConversionError('sensors = ' + str(sensors))
def convert_1d_box_sensors_to_gym_space(
sensors: typing.List[sensor.Sensor]) -> gym.Space:
"""Convert a list of 1D BoxSpaceSensors to the corresponding gym space.
Args:
sensors: a list of the current sensors
Returns:
space: the converted gym space
Raises:
UnsupportedConversionError: raises when the function cannot convert the
given list of sensors.
AmbiguousDataTypeError: raises when the function cannot determine the
data types because they are not uniform.
"""
# Check if all sensors are 1D BoxSpaceSensors
if not all([
isinstance(s, sensor.BoxSpaceSensor) and s.get_dimension() == 1
for s in sensors
]):
raise UnsupportedConversionError('sensors = ' + str(sensors))
# Check if all sensors have the same data type
sensor_dtypes = [s.get_dtype() for s in sensors]
if sensor_dtypes.count(sensor_dtypes[0]) != len(sensor_dtypes):
raise AmbiguousDataTypeError('sensor datatypes are inhomogeneous')
lower_bound = np.concatenate([s.get_lower_bound() for s in sensors])
upper_bound = np.concatenate([s.get_upper_bound() for s in sensors])
observation_space = spaces.Box(np.array(lower_bound),
np.array(upper_bound),
dtype=np.float32)
return observation_space
def convert_sensors_to_gym_space_dictionary(
sensors: typing.List[sensor.Sensor]) -> gym.Space:
"""Convert a list of sensors to the corresponding gym space dictionary.
Args:
sensors: a list of the current sensors
Returns:
space: the converted gym space dictionary
Raises:
UnsupportedConversionError: raises when the function cannot convert the
given list of sensors.
"""
gym_space_dict = {}
for s in sensors:
if isinstance(s, sensor.BoxSpaceSensor):
gym_space_dict[s.get_name()] = spaces.Box(np.array(s.get_lower_bound()),
np.array(s.get_upper_bound()),
dtype=np.float32)
else:
raise UnsupportedConversionError('sensors = ' + str(sensors))
return spaces.Dict(gym_space_dict)
| [
"numpy.array",
"gym.spaces.Dict"
] | [((3727, 3754), 'gym.spaces.Dict', 'spaces.Dict', (['gym_space_dict'], {}), '(gym_space_dict)\n', (3738, 3754), False, 'from gym import spaces\n'), ((2764, 2785), 'numpy.array', 'np.array', (['lower_bound'], {}), '(lower_bound)\n', (2772, 2785), True, 'import numpy as np\n'), ((2820, 2841), 'numpy.array', 'np.array', (['upper_bound'], {}), '(upper_bound)\n', (2828, 2841), True, 'import numpy as np\n')] |
import asyncio
import numpy as np
import json
import logging
import datetime
import math
from fifo import Fifo
from run_task import run_task
import location_mapper
ACCEL_FIFO_CAPACITY = 6000 # number of samples in Acceleration Data Fifo ~1 minute
ACCEL_NOMINAL_SAMPLE_PERIOD = 0.01 # sample rate we except the samples to arrive
LOC_FIFO_CAPACITY = 60 # number of samples in Location Fifo
RMS_WINDOW_SIZE = 80 # number of samples over which RMS is computed
PEAK_THRESHOLD = 0.42 # if RMS value above that threshold, send ADS message
MAP_TIMESTAMP_TIMEOUT_SECONDS = 60
_logger = logging.getLogger(__name__)
class Analyzer:
def __init__(self, q, mqtt_client):
self._q = q
self._mqtt_client = mqtt_client
self._logic = AnalyzerLogic(
RMS_WINDOW_SIZE, ACCEL_NOMINAL_SAMPLE_PERIOD)
# communication between accel_handler and monitor task
self._accel_fifo = Fifo((ACCEL_FIFO_CAPACITY, 2))
self._accel_q = asyncio.Queue()
self._loc_fifo = Fifo((LOC_FIFO_CAPACITY, 3))
self._task = asyncio.create_task(run_task(_logger, q, self._monitor))
async def _accel_handler(self, msg):
"""This is the handler function that gets registered for `environment/acceleration`.
The received data is a python dictionary.
msg['payload'] is the MQTT message as received from MQTT. Here, the payload is
a json message, so we convert the json to a python dictionary.
"""
payload = json.loads(msg["payload"])
# Z-acceleration + timestamps to np-array
accel = payload["accel"]
n = len(accel)
ts = payload["time"]
td = payload["time_delta"]
arr = np.ndarray((n, 2))
for i in range(n):
arr[i, 0] = ts + td * i
arr[i, 1] = accel[i]["z"]
age = datetime.datetime.now() - datetime.datetime.fromtimestamp(float(ts))
_logger.debug(
f"Place {n} accel samples into fifo. age={age.total_seconds()} s")
# Push into Fifo
try:
self._accel_fifo.push(arr)
await self._accel_q.put(0)
except BufferError as e:
_logger.error(f"Accel Fifo: {e}")
async def _loc_handler(self, msg):
"""This is the handler function that gets registered for `environment/location`"""
payload = json.loads(msg["payload"])
_logger.debug(f"loc_handler {payload}")
try:
entry = np.array([payload["time"], payload["lat"], payload["lon"]])
self._loc_fifo.push(entry)
except BufferError as e:
_logger.error(f"Loc Fifo: {e}")
async def register_handlers(self):
await self._mqtt_client.subscribe(
"environment/acceleration", self._accel_handler
)
await self._mqtt_client.subscribe("environment/location", self._loc_handler)
async def _monitor(self):
await self.register_handlers()
while True:
# wait for accel_handler to put something into FIFO
await self._accel_q.get()
while True:
try:
accel = self._accel_fifo.pop(RMS_WINDOW_SIZE)
_logger.debug(
f"Got {RMS_WINDOW_SIZE} accel samples from fifo.")
ts, rms = self._logic.analyze(accel)
if ts is not None:
map_status, lat, lon = await self.map_timestamp(ts)
_logger.info(f"RMS={rms} at ts={ts} loc={lat}, {lon}")
# Discard location entries that are too old
self.clean_locs_fifo(ts)
if map_status == "ok" and rms > PEAK_THRESHOLD:
# report peak to main task
_logger.info("*** Peak detected!")
await self._q.put(
{
"time": datetime.datetime.fromtimestamp(float(ts)),
"lat": lat,
"lon": lon,
"vibrationIntensity": rms,
}
)
except BufferError:
# not enough data in fifo, wait for more
break
async def map_timestamp(self, ts):
"""
Map the timestamp to a location position.
If ts is newer than all received locations, wait for newer locations
:param ts: timestamp to map
:return: map_status, lat, lon
"ok" - mapping done - lat, lon valid
"ts-too-old" mapping failed - ts is older than all timestamps in locs
"time-gap" mapping failed - time gap in accel
"""
if ts is None:
map_status = "time-gap"
lat = lon = None
else:
elapsed_time_seconds = 0
while elapsed_time_seconds < MAP_TIMESTAMP_TIMEOUT_SECONDS:
locs = self._loc_fifo.peek(self._loc_fifo.entries())
lat, lon, map_status = location_mapper.map_timestamp_to_location(
ts, locs
)
if map_status == "ts-too-new":
# no location data available. Let's wait
_logger.info("Wait for newer location data")
await asyncio.sleep(1)
elapsed_time_seconds += 1
else:
break
if map_status != "ok":
_logger.warn(f"Could not map timestamp to location: {map_status}")
return map_status, lat, lon
def clean_locs_fifo(self, ts):
locs = self._loc_fifo.peek(self._loc_fifo.entries())
unused_loc_idx = location_mapper.find_last_unused_location_entry(
ts, locs)
if unused_loc_idx is not None:
self._loc_fifo.pop(unused_loc_idx + 1)
class AnalyzerLogic:
def __init__(self, rms_window_size, accel_nominal_sample_period):
"""
Stateless logic of Analyzer
"""
self._rms_window_size = rms_window_size
self._accel_nominal_sample_period = accel_nominal_sample_period
def analyze(self, accel):
"""
Determine RMS value of <accel>.
:param accel: chunk of RMS_WINDOW_SIZE z-acceleration samples
:return: ts, rms
<ts> timestamp of middle entry of accel - none if accel data has time gaps
<rms> rms value of accel
"""
rms = None
ts = None
# ensure all data in the buffer is from the "same" moment
if self._has_time_gaps(accel):
_logger.error("Time gap. Not analyzing data")
else:
# Compute RMS value
rms = self._analyze_chunk(accel)
# Get timestamp in the middle of the window
ts = accel[int(len(accel) / 2), 0]
return ts, rms
@staticmethod
def _analyze_chunk(accel):
"""
Build the RMS over differential over the z-acceleration
:return: rms
"""
# compute differential
d_accel = np.ndarray((len(accel)))
d_accel = np.diff(accel[:, 1], prepend=accel[0, 1])
# compute RMS value over differential
rms = AnalyzerLogic._calculate_rms(d_accel)
return rms
@staticmethod
def _calculate_rms(chunk):
chunk = pow(abs(chunk), 2)
return math.sqrt(chunk.mean())
def _has_time_gaps(self, accel):
dt = accel[-1, 0] - accel[0, 0]
_logger.debug(f"time_gap={dt}")
return dt > (self._accel_nominal_sample_period * self._rms_window_size * 1.1)
@staticmethod
def _map_timestamp_to_location(ts, locs):
"""
Find the location (lat,lon) at time <ts> in <locs>.
<locs> must be array with entries [time,lat,lon], oldest entry first
:return: lat, lon, status
<lat>, <lon>: averaged position at time (linear interpolation)
<status>: "ok" - mapping done
"ts-too-new" ts is newer than all timestamps in locs, or locs is empty
"ts-too-old" ts is older than all timestamps in locs
"""
status = "ts-too-old" if len(locs) > 0 else "ts-too-new"
for idx in range(len(locs)):
try:
if ts >= locs[idx, 0] and ts < locs[idx + 1, 0]:
td = ts - locs[idx, 0]
f = td / (locs[idx + 1, 0] - locs[idx, 0])
lat = (locs[idx + 1, 1] - locs[idx, 1]) * f + locs[idx, 1]
lon = (locs[idx + 1, 2] - locs[idx, 2]) * f + locs[idx, 2]
return lat, lon, "ok"
except IndexError:
status = "ts-too-new"
break
if len(locs > 0):
_logger.error(f"oldest: {ts-locs[0,0]} newest: {ts-locs[-1,0]}")
return None, None, status
@staticmethod
def _find_last_unused_location_entry(ts, locs):
"""
Find the last location entry that is no more used.
i.e. the last entry that is older than the one just before <ts>.
<locs> must be array with entries [time,lat,lon], oldest entry first
:return: idx or None
"""
idx = 0
for idx in range(len(locs)):
try:
if ts >= locs[idx, 0] and ts < locs[idx + 1, 0]:
break
except IndexError:
break
if idx > 0:
return idx - 1
| [
"logging.getLogger",
"json.loads",
"fifo.Fifo",
"asyncio.Queue",
"numpy.diff",
"location_mapper.find_last_unused_location_entry",
"location_mapper.map_timestamp_to_location",
"datetime.datetime.now",
"run_task.run_task",
"numpy.array",
"numpy.ndarray",
"asyncio.sleep"
] | [((586, 613), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (603, 613), False, 'import logging\n'), ((917, 947), 'fifo.Fifo', 'Fifo', (['(ACCEL_FIFO_CAPACITY, 2)'], {}), '((ACCEL_FIFO_CAPACITY, 2))\n', (921, 947), False, 'from fifo import Fifo\n'), ((972, 987), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (985, 987), False, 'import asyncio\n'), ((1014, 1042), 'fifo.Fifo', 'Fifo', (['(LOC_FIFO_CAPACITY, 3)'], {}), '((LOC_FIFO_CAPACITY, 3))\n', (1018, 1042), False, 'from fifo import Fifo\n'), ((1495, 1521), 'json.loads', 'json.loads', (["msg['payload']"], {}), "(msg['payload'])\n", (1505, 1521), False, 'import json\n'), ((1707, 1725), 'numpy.ndarray', 'np.ndarray', (['(n, 2)'], {}), '((n, 2))\n', (1717, 1725), True, 'import numpy as np\n'), ((2359, 2385), 'json.loads', 'json.loads', (["msg['payload']"], {}), "(msg['payload'])\n", (2369, 2385), False, 'import json\n'), ((5837, 5894), 'location_mapper.find_last_unused_location_entry', 'location_mapper.find_last_unused_location_entry', (['ts', 'locs'], {}), '(ts, locs)\n', (5884, 5894), False, 'import location_mapper\n'), ((7250, 7291), 'numpy.diff', 'np.diff', (['accel[:, 1]'], {'prepend': 'accel[0, 1]'}), '(accel[:, 1], prepend=accel[0, 1])\n', (7257, 7291), True, 'import numpy as np\n'), ((1085, 1120), 'run_task.run_task', 'run_task', (['_logger', 'q', 'self._monitor'], {}), '(_logger, q, self._monitor)\n', (1093, 1120), False, 'from run_task import run_task\n'), ((1843, 1866), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1864, 1866), False, 'import datetime\n'), ((2467, 2526), 'numpy.array', 'np.array', (["[payload['time'], payload['lat'], payload['lon']]"], {}), "([payload['time'], payload['lat'], payload['lon']])\n", (2475, 2526), True, 'import numpy as np\n'), ((5166, 5217), 'location_mapper.map_timestamp_to_location', 'location_mapper.map_timestamp_to_location', (['ts', 'locs'], {}), '(ts, locs)\n', (5207, 5217), False, 'import location_mapper\n'), ((5456, 5472), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (5469, 5472), False, 'import asyncio\n')] |
import tvm
import numpy as np
dtype = "float32"
A = tvm.te.placeholder([4, 4], dtype=dtype, name="A")
B = tvm.te.compute([4, 4], lambda i, j: A[i, j] + 1, name="B")
C = tvm.te.compute([4, 4], lambda i, j: A[i, j] * 2, name="C")
target = "llvm"
s1 = tvm.te.create_schedule(B.op)
s2 = tvm.te.create_schedule(C.op)
s3 = tvm.te.create_schedule([B.op, C.op])
func1 = tvm.build(s1, [A, B], target=target)
func2 = tvm.build(s2, [A, C], target=target)
func3 = tvm.build(s3, [A, B, C], target=target)
ctx = tvm.context(target)
A_np = np.random.uniform(-1, 1, [4, 4]).astype(dtype)
B_np = np.zeros([4, 4]).astype(dtype)
C_np = np.zeros([4, 4]).astype(dtype)
print("Inputs:")
print(A_np)
def run(func, id):
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(C_np, ctx)
if id == 0:
func(A_tvm, B_tvm)
print("Outputs:")
print(B_tvm.asnumpy())
elif id == 1:
func(A_tvm, C_tvm)
print("Outputs:")
print(C_tvm.asnumpy())
elif id == 2:
func(A_tvm, B_tvm, C_tvm)
print("Outputs 1:")
print(B_tvm.asnumpy())
print("Outputs 2:")
print(C_tvm.asnumpy())
run(func1, 0)
run(func2, 1)
run(func3, 2)
| [
"tvm.nd.array",
"tvm.te.create_schedule",
"tvm.te.placeholder",
"tvm.build",
"tvm.context",
"numpy.zeros",
"tvm.te.compute",
"numpy.random.uniform"
] | [((55, 104), 'tvm.te.placeholder', 'tvm.te.placeholder', (['[4, 4]'], {'dtype': 'dtype', 'name': '"""A"""'}), "([4, 4], dtype=dtype, name='A')\n", (73, 104), False, 'import tvm\n'), ((110, 168), 'tvm.te.compute', 'tvm.te.compute', (['[4, 4]', '(lambda i, j: A[i, j] + 1)'], {'name': '"""B"""'}), "([4, 4], lambda i, j: A[i, j] + 1, name='B')\n", (124, 168), False, 'import tvm\n'), ((174, 232), 'tvm.te.compute', 'tvm.te.compute', (['[4, 4]', '(lambda i, j: A[i, j] * 2)'], {'name': '"""C"""'}), "([4, 4], lambda i, j: A[i, j] * 2, name='C')\n", (188, 232), False, 'import tvm\n'), ((256, 284), 'tvm.te.create_schedule', 'tvm.te.create_schedule', (['B.op'], {}), '(B.op)\n', (278, 284), False, 'import tvm\n'), ((291, 319), 'tvm.te.create_schedule', 'tvm.te.create_schedule', (['C.op'], {}), '(C.op)\n', (313, 319), False, 'import tvm\n'), ((326, 362), 'tvm.te.create_schedule', 'tvm.te.create_schedule', (['[B.op, C.op]'], {}), '([B.op, C.op])\n', (348, 362), False, 'import tvm\n'), ((372, 408), 'tvm.build', 'tvm.build', (['s1', '[A, B]'], {'target': 'target'}), '(s1, [A, B], target=target)\n', (381, 408), False, 'import tvm\n'), ((418, 454), 'tvm.build', 'tvm.build', (['s2', '[A, C]'], {'target': 'target'}), '(s2, [A, C], target=target)\n', (427, 454), False, 'import tvm\n'), ((464, 503), 'tvm.build', 'tvm.build', (['s3', '[A, B, C]'], {'target': 'target'}), '(s3, [A, B, C], target=target)\n', (473, 503), False, 'import tvm\n'), ((511, 530), 'tvm.context', 'tvm.context', (['target'], {}), '(target)\n', (522, 530), False, 'import tvm\n'), ((723, 746), 'tvm.nd.array', 'tvm.nd.array', (['A_np', 'ctx'], {}), '(A_np, ctx)\n', (735, 746), False, 'import tvm\n'), ((757, 780), 'tvm.nd.array', 'tvm.nd.array', (['B_np', 'ctx'], {}), '(B_np, ctx)\n', (769, 780), False, 'import tvm\n'), ((791, 814), 'tvm.nd.array', 'tvm.nd.array', (['C_np', 'ctx'], {}), '(C_np, ctx)\n', (803, 814), False, 'import tvm\n'), ((539, 571), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '[4, 4]'], {}), '(-1, 1, [4, 4])\n', (556, 571), True, 'import numpy as np\n'), ((593, 609), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (601, 609), True, 'import numpy as np\n'), ((631, 647), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (639, 647), True, 'import numpy as np\n')] |
import resnet2 as net
import numpy as np
import cv2
import scipy.io as sio
import os
from os import listdir
import random
def Average(inp):
a = inp/np.linalg.norm(inp, axis=1, keepdims=True)
a = np.sum(a, axis=0)
a = a/np.linalg.norm(a)
return a
path = r'O:\[FY2017]\MS-Challenges\code\evaluation_all\models\ensemble_google_submit3\test_data\5/'
path = path.replace('\\', '/')
print (path)
NUM = 0
imglist = path + 'dev5.txt'
with open(imglist) as f:
NUM = sum(1 for _ in f)
f.close()
f = open(imglist, 'r')
res = []
imgs = []
labs = []
count = 0
n = 0
print('reading images...')
ff = []
for pic in f:
n += 1
pic = pic.strip()
lab = pic.split('\\')[-1].replace('.jpg', '')
# print (lab)
img_path2 = pic
try:
img = cv2.imread(img_path2,1)
except:
count += 1
ff.append(pic)
continue
img = cv2.resize(img,(122,144))
M2 = np.float32([[1,0,11],[0,1,0]])
img = cv2.warpAffine(img,M2,(144,144))
imgs = []
for i in range(20):
w = random.randint(0, 16)
h = random.randint(0, 16)
img2 = img[w:w+128, h:h+128]
img2 = np.float32(img2)
imgs.append(img2)
for i in range(5):
w = random.randint(0, 16)
h = random.randint(0, 16)
img2 = img[w:w+128, h:h+128]
img2 = cv2.flip(img2, 1)
img2 = np.float32(img2)
imgs.append(img2)
feas = net.eval(imgs)
feas_avg = Average(feas)
feas_avg = np.array(feas_avg)
labs.append(lab)
res.append(feas_avg)
if n % 10 == 0:
print (str(n) + '/' + str(NUM) + '\t' + str(count))
save_path = path + 'dev5_Res.mat'
sio.savemat(save_path,{'data':res, 'label':labs})
f.close() | [
"cv2.warpAffine",
"scipy.io.savemat",
"random.randint",
"cv2.flip",
"numpy.sum",
"numpy.array",
"resnet2.eval",
"numpy.linalg.norm",
"cv2.resize",
"cv2.imread",
"numpy.float32"
] | [((1570, 1622), 'scipy.io.savemat', 'sio.savemat', (['save_path', "{'data': res, 'label': labs}"], {}), "(save_path, {'data': res, 'label': labs})\n", (1581, 1622), True, 'import scipy.io as sio\n'), ((210, 227), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (216, 227), True, 'import numpy as np\n'), ((861, 888), 'cv2.resize', 'cv2.resize', (['img', '(122, 144)'], {}), '(img, (122, 144))\n', (871, 888), False, 'import cv2\n'), ((894, 929), 'numpy.float32', 'np.float32', (['[[1, 0, 11], [0, 1, 0]]'], {}), '([[1, 0, 11], [0, 1, 0]])\n', (904, 929), True, 'import numpy as np\n'), ((933, 968), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M2', '(144, 144)'], {}), '(img, M2, (144, 144))\n', (947, 968), False, 'import cv2\n'), ((1343, 1357), 'resnet2.eval', 'net.eval', (['imgs'], {}), '(imgs)\n', (1351, 1357), True, 'import resnet2 as net\n'), ((1398, 1416), 'numpy.array', 'np.array', (['feas_avg'], {}), '(feas_avg)\n', (1406, 1416), True, 'import numpy as np\n'), ((161, 203), 'numpy.linalg.norm', 'np.linalg.norm', (['inp'], {'axis': '(1)', 'keepdims': '(True)'}), '(inp, axis=1, keepdims=True)\n', (175, 203), True, 'import numpy as np\n'), ((236, 253), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (250, 253), True, 'import numpy as np\n'), ((775, 799), 'cv2.imread', 'cv2.imread', (['img_path2', '(1)'], {}), '(img_path2, 1)\n', (785, 799), False, 'import cv2\n'), ((1010, 1031), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (1024, 1031), False, 'import random\n'), ((1039, 1060), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (1053, 1060), False, 'import random\n'), ((1104, 1120), 'numpy.float32', 'np.float32', (['img2'], {}), '(img2)\n', (1114, 1120), True, 'import numpy as np\n'), ((1172, 1193), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (1186, 1193), False, 'import random\n'), ((1201, 1222), 'random.randint', 'random.randint', (['(0)', '(16)'], {}), '(0, 16)\n', (1215, 1222), False, 'import random\n'), ((1266, 1283), 'cv2.flip', 'cv2.flip', (['img2', '(1)'], {}), '(img2, 1)\n', (1274, 1283), False, 'import cv2\n'), ((1294, 1310), 'numpy.float32', 'np.float32', (['img2'], {}), '(img2)\n', (1304, 1310), True, 'import numpy as np\n')] |
# %%
# Third party libraries
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# Import local module
from multyscale import utils, filterbank
# %% Load example stimulus
stimulus = np.asarray(Image.open("example_stimulus.png").convert("L"))
# %% Parameters of image
shape = stimulus.shape # filtershape in pixels
# visual extent, same convention as pyplot:
visextent = (-16, 16, -16, 16)
# %% Create image coordinate system:
axish = np.linspace(visextent[0], visextent[1], shape[0])
axisv = np.linspace(visextent[2], visextent[3], shape[1])
(x, y) = np.meshgrid(axish, axisv)
# %% Filterbank parameters
# Parameters (BM1999)
n_orientations = 6
num_scales = 7
largest_center_sigma = 3 # in degrees
center_sigmas = utils.octave_intervals(num_scales) * largest_center_sigma
cs_ratio = 2 # center-surround ratio
# Convert to filterbank parameters
orientations = np.arange(0, 180, 180 / n_orientations)
sigmas = [((s, s), (s, cs_ratio * s)) for s in center_sigmas]
# %% Create filterbank
bank = filterbank.ODOGBank(orientations, sigmas, x, y)
# %% Visualise filterbank
for i in range(bank.filters.shape[0]):
for j in range(bank.filters.shape[1]):
plt.subplot(
bank.filters.shape[0],
bank.filters.shape[1],
i * bank.filters.shape[0] + ((j + i) * 1) + 1,
)
plt.imshow(bank.filters[i, j, ...], extent=visextent)
# %% Apply filterbank
filters_output = bank.apply(stimulus)
# %% Visualise filter bank output
for i in range(filters_output.shape[0]):
for j in range(filters_output.shape[1]):
plt.subplot(
filters_output.shape[0],
filters_output.shape[1],
i * filters_output.shape[0] + ((j + i) * 1) + 1,
)
plt.imshow(filters_output[i, j, ...], extent=visextent)
| [
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"multyscale.filterbank.ODOGBank",
"multyscale.utils.octave_intervals",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.subplot",
"numpy.arange"
] | [((460, 509), 'numpy.linspace', 'np.linspace', (['visextent[0]', 'visextent[1]', 'shape[0]'], {}), '(visextent[0], visextent[1], shape[0])\n', (471, 509), True, 'import numpy as np\n'), ((518, 567), 'numpy.linspace', 'np.linspace', (['visextent[2]', 'visextent[3]', 'shape[1]'], {}), '(visextent[2], visextent[3], shape[1])\n', (529, 567), True, 'import numpy as np\n'), ((578, 603), 'numpy.meshgrid', 'np.meshgrid', (['axish', 'axisv'], {}), '(axish, axisv)\n', (589, 603), True, 'import numpy as np\n'), ((890, 929), 'numpy.arange', 'np.arange', (['(0)', '(180)', '(180 / n_orientations)'], {}), '(0, 180, 180 / n_orientations)\n', (899, 929), True, 'import numpy as np\n'), ((1023, 1070), 'multyscale.filterbank.ODOGBank', 'filterbank.ODOGBank', (['orientations', 'sigmas', 'x', 'y'], {}), '(orientations, sigmas, x, y)\n', (1042, 1070), False, 'from multyscale import utils, filterbank\n'), ((743, 777), 'multyscale.utils.octave_intervals', 'utils.octave_intervals', (['num_scales'], {}), '(num_scales)\n', (765, 777), False, 'from multyscale import utils, filterbank\n'), ((1188, 1295), 'matplotlib.pyplot.subplot', 'plt.subplot', (['bank.filters.shape[0]', 'bank.filters.shape[1]', '(i * bank.filters.shape[0] + (j + i) * 1 + 1)'], {}), '(bank.filters.shape[0], bank.filters.shape[1], i * bank.filters.\n shape[0] + (j + i) * 1 + 1)\n', (1199, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1401), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bank.filters[i, j, ...]'], {'extent': 'visextent'}), '(bank.filters[i, j, ...], extent=visextent)\n', (1358, 1401), True, 'import matplotlib.pyplot as plt\n'), ((1592, 1704), 'matplotlib.pyplot.subplot', 'plt.subplot', (['filters_output.shape[0]', 'filters_output.shape[1]', '(i * filters_output.shape[0] + (j + i) * 1 + 1)'], {}), '(filters_output.shape[0], filters_output.shape[1], i *\n filters_output.shape[0] + (j + i) * 1 + 1)\n', (1603, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1758, 1813), 'matplotlib.pyplot.imshow', 'plt.imshow', (['filters_output[i, j, ...]'], {'extent': 'visextent'}), '(filters_output[i, j, ...], extent=visextent)\n', (1768, 1813), True, 'import matplotlib.pyplot as plt\n'), ((216, 250), 'PIL.Image.open', 'Image.open', (['"""example_stimulus.png"""'], {}), "('example_stimulus.png')\n", (226, 250), False, 'from PIL import Image\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.